diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index b5dbefef4e..5c84e856d5 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -279,7 +279,6 @@ jobs: - aarch64-unknown-linux-gnu - aarch64_be-unknown-linux-gnu - armv7-unknown-linux-gnueabihf - - arm-unknown-linux-gnueabihf - x86_64-unknown-linux-gnu profile: [dev, release] include: diff --git a/ci/docker/aarch64-unknown-linux-gnu/Dockerfile b/ci/docker/aarch64-unknown-linux-gnu/Dockerfile index 8435dd3ded..e2b3d95585 100644 --- a/ci/docker/aarch64-unknown-linux-gnu/Dockerfile +++ b/ci/docker/aarch64-unknown-linux-gnu/Dockerfile @@ -1,11 +1,9 @@ FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc \ - g++ \ ca-certificates \ libc6-dev \ gcc-aarch64-linux-gnu \ - g++-aarch64-linux-gnu \ libc6-dev-arm64-cross \ qemu-user \ make \ diff --git a/ci/docker/aarch64_be-unknown-linux-gnu/Dockerfile b/ci/docker/aarch64_be-unknown-linux-gnu/Dockerfile index 0e8efc64bb..d7c12493ad 100644 --- a/ci/docker/aarch64_be-unknown-linux-gnu/Dockerfile +++ b/ci/docker/aarch64_be-unknown-linux-gnu/Dockerfile @@ -2,7 +2,6 @@ FROM ubuntu:25.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc \ - g++ \ ca-certificates \ libc6-dev \ libc6-dev-arm64-cross \ diff --git a/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile b/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile index 6d4ff24828..23e4d5a341 100644 --- a/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile +++ b/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile @@ -7,7 +7,9 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ libc6-dev-armhf-cross \ qemu-user \ make \ - file + file \ + clang \ + lld ENV CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABIHF_LINKER=arm-linux-gnueabihf-gcc \ CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABIHF_RUNNER="qemu-arm -cpu max -L /usr/arm-linux-gnueabihf" \ OBJDUMP=arm-linux-gnueabihf-objdump diff --git a/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile b/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile index c0a4ed3e70..02744917af 100644 --- a/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile +++ b/ci/docker/armv7-unknown-linux-gnueabihf/Dockerfile @@ -1,11 +1,9 @@ FROM ubuntu:24.04 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc \ - g++ \ ca-certificates \ libc6-dev \ gcc-arm-linux-gnueabihf \ - g++-arm-linux-gnueabihf \ libc6-dev-armhf-cross \ qemu-user \ make \ diff --git a/ci/docker/x86_64-unknown-linux-gnu/Dockerfile b/ci/docker/x86_64-unknown-linux-gnu/Dockerfile index ca6192a38d..17d1ac67e7 100644 --- a/ci/docker/x86_64-unknown-linux-gnu/Dockerfile +++ b/ci/docker/x86_64-unknown-linux-gnu/Dockerfile @@ -6,11 +6,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ make \ ca-certificates \ wget \ - xz-utils \ - clang \ - libstdc++-14-dev \ - build-essential \ - lld + xz-utils RUN wget http://ci-mirrors.rust-lang.org/sde-external-10.8.0-2026-03-15-lin.tar.xz -O sde.tar.xz RUN mkdir intel-sde diff --git a/ci/intrinsic-test-docker.sh b/ci/intrinsic-test-docker.sh index beeff42c76..948b53dc67 100755 --- a/ci/intrinsic-test-docker.sh +++ b/ci/intrinsic-test-docker.sh @@ -48,7 +48,7 @@ run() { --workdir /checkout \ --privileged \ stdarch \ - sh -c "HOME=/tmp PATH=\$PATH:/rust/bin exec ci/intrinsic-test.sh ${1}" + sh -c "HOME=/tmp PATH=\$PATH:/rust/bin exec ci/intrinsic-test.sh" } if [ -z "$1" ]; then diff --git a/ci/intrinsic-test.sh b/ci/intrinsic-test.sh index 89104e2672..1f3a2caf50 100755 --- a/ci/intrinsic-test.sh +++ b/ci/intrinsic-test.sh @@ -5,127 +5,56 @@ set -ex : "${TARGET?The TARGET environment variable must be set.}" export RUSTFLAGS="${RUSTFLAGS} -D warnings -Z merge-functions=disabled -Z verify-llvm-ir" -export HOST_RUSTFLAGS="${RUSTFLAGS}" export PROFILE="${PROFILE:="release"}" -case ${TARGET} in - # On 32-bit use a static relocation model which avoids some extra - # instructions when dealing with static data, notably allowing some - # instruction assertion checks to pass below the 20 instruction limit. If - # this is the default, dynamic, then too many instructions are generated - # when we assert the instruction for a function and it causes tests to fail. - i686-* | i586-*) - export RUSTFLAGS="${RUSTFLAGS} -C relocation-model=static" - ;; - # Some x86_64 targets enable by default more features beyond SSE2, - # which cause some instruction assertion checks to fail. - x86_64-*) - export RUSTFLAGS="${RUSTFLAGS} -C target-feature=-sse3" - ;; - #Unoptimized build uses fast-isel which breaks with msa - mips-* | mipsel-*) - export RUSTFLAGS="${RUSTFLAGS} -C llvm-args=-fast-isel=false" - ;; - armv7-*eabihf | thumbv7-*eabihf) - export RUSTFLAGS="${RUSTFLAGS} -Ctarget-feature=+neon" - ;; - # Some of our test dependencies use the deprecated `gcc` crates which - # doesn't detect RISC-V compilers automatically, so do it manually here. - riscv*) - export RUSTFLAGS="${RUSTFLAGS} -Ctarget-feature=+zk,+zks,+zbb,+zbc" - ;; -esac - echo "RUSTFLAGS=${RUSTFLAGS}" -echo "OBJDUMP=${OBJDUMP}" echo "PROFILE=${PROFILE}" INTRINSIC_TEST="--manifest-path=crates/intrinsic-test/Cargo.toml" -# Test targets compiled with extra features. +export CC="clang" + case ${TARGET} in - # Setup aarch64 & armv7 specific variables, the runner, along with some - # tests to skip - aarch64-unknown-linux-gnu*) - TEST_CPPFLAGS="-fuse-ld=lld -I/usr/aarch64-linux-gnu/include/ -I/usr/aarch64-linux-gnu/include/c++/9/aarch64-linux-gnu/" - TEST_SKIP_INTRINSICS=crates/intrinsic-test/missing_aarch64.txt - TEST_CXX_COMPILER="clang++" - TEST_RUNNER="${CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER}" - : "${TEST_SAMPLE_INTRINSICS_PERCENTAGE:=100}" + aarch64_be*) + export CFLAGS="-I${AARCH64_BE_TOOLCHAIN}/aarch64_be-none-linux-gnu/libc/usr/include --sysroot={AARCH64_BE_TOOLCHAIN}/aarch64_be-none-linux-gnu/libc -Wno-nonportable-vector-initialization" + TEST_SKIP_INTRINSICS=crates/intrinsic-test/missing_aarch64_be.txt ;; - aarch64_be-unknown-linux-gnu*) - TEST_CPPFLAGS="-fuse-ld=lld" - TEST_SKIP_INTRINSICS=crates/intrinsic-test/missing_aarch64_be.txt - TEST_CXX_COMPILER="clang++" - TEST_RUNNER="${CARGO_TARGET_AARCH64_BE_UNKNOWN_LINUX_GNU_RUNNER}" - : "${TEST_SAMPLE_INTRINSICS_PERCENTAGE:=100}" + aarch64*) + export CFLAGS="-I/usr/aarch64-linux-gnu/include/" + TEST_SKIP_INTRINSICS=crates/intrinsic-test/missing_aarch64.txt ;; - armv7-unknown-linux-gnueabihf*) - TEST_CPPFLAGS="-fuse-ld=lld -I/usr/arm-linux-gnueabihf/include/ -I/usr/arm-linux-gnueabihf/include/c++/9/arm-linux-gnueabihf/" + armv7*) + export CFLAGS="-I/usr/arm-linux-gnueabihf/include/" TEST_SKIP_INTRINSICS=crates/intrinsic-test/missing_arm.txt - TEST_CXX_COMPILER="clang++" - TEST_RUNNER="${CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_RUNNER}" - : "${TEST_SAMPLE_INTRINSICS_PERCENTAGE:=100}" ;; - x86_64-unknown-linux-gnu*) - TEST_CPPFLAGS="-fuse-ld=lld -I/usr/include/x86_64-linux-gnu/" - TEST_CXX_COMPILER="clang++" - TEST_RUNNER="${CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER}" + x86_64*) + export CFLAGS="-I/usr/include/x86_64-linux-gnu/" TEST_SKIP_INTRINSICS=crates/intrinsic-test/missing_x86.txt - : "${TEST_SAMPLE_INTRINSICS_PERCENTAGE:=20}" ;; *) ;; esac -# Arm specific case "${TARGET}" in - aarch64-unknown-linux-gnu*|armv7-unknown-linux-gnueabihf*) - CPPFLAGS="${TEST_CPPFLAGS}" RUSTFLAGS="${HOST_RUSTFLAGS}" RUST_LOG=warn \ - cargo run "${INTRINSIC_TEST}" --release \ - --bin intrinsic-test -- intrinsics_data/arm_intrinsics.json \ - --runner "${TEST_RUNNER}" \ - --cppcompiler "${TEST_CXX_COMPILER}" \ - --skip "${TEST_SKIP_INTRINSICS}" \ - --target "${TARGET}" \ - --profile "${PROFILE}" \ - --sample-percentage "${TEST_SAMPLE_INTRINSICS_PERCENTAGE}" - ;; - - aarch64_be-unknown-linux-gnu*) - CPPFLAGS="${TEST_CPPFLAGS}" RUSTFLAGS="${HOST_RUSTFLAGS}" RUST_LOG=warn \ - cargo run "${INTRINSIC_TEST}" --release \ - --bin intrinsic-test -- intrinsics_data/arm_intrinsics.json \ - --runner "${TEST_RUNNER}" \ - --cppcompiler "${TEST_CXX_COMPILER}" \ - --skip "${TEST_SKIP_INTRINSICS}" \ - --target "${TARGET}" \ - --profile "${PROFILE}" \ - --linker "${CARGO_TARGET_AARCH64_BE_UNKNOWN_LINUX_GNU_LINKER}" \ - --cxx-toolchain-dir "${AARCH64_BE_TOOLCHAIN}" \ - --sample-percentage "${TEST_SAMPLE_INTRINSICS_PERCENTAGE}" - ;; - x86_64-unknown-linux-gnu*) - # `CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER` is not necessary for `intrinsic-test` - # because the binary needs to run directly on the host. - # Hence the use of `env -u`. env -u CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER \ - CPPFLAGS="${TEST_CPPFLAGS}" RUSTFLAGS="${HOST_RUSTFLAGS}" \ - RUST_LOG=warn RUST_BACKTRACE=1 \ cargo run "${INTRINSIC_TEST}" --release \ --bin intrinsic-test -- intrinsics_data/x86-intel.xml \ - --runner "${TEST_RUNNER}" \ --skip "${TEST_SKIP_INTRINSICS}" \ - --cppcompiler "${TEST_CXX_COMPILER}" \ - --target "${TARGET}" \ - --profile "${PROFILE}" \ - --sample-percentage "${TEST_SAMPLE_INTRINSICS_PERCENTAGE}" + --target "${TARGET}" + + echo "${CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER}" ;; - *) + *) + cargo run "${INTRINSIC_TEST}" --release \ + --bin intrinsic-test -- intrinsics_data/arm_intrinsics.json \ + --skip "${TEST_SKIP_INTRINSICS}" \ + --target "${TARGET}" ;; esac + +cargo test --manifest-path=rust_programs/Cargo.toml --target "${TARGET}" --profile "${PROFILE}" diff --git a/crates/core_arch/src/aarch64/neon/generated.rs b/crates/core_arch/src/aarch64/neon/generated.rs index 3241583cf0..8d701d9b88 100644 --- a/crates/core_arch/src/aarch64/neon/generated.rs +++ b/crates/core_arch/src/aarch64/neon/generated.rs @@ -1029,6 +1029,7 @@ pub fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -1045,8 +1046,33 @@ pub fn vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { _vcadd_rot270_f16(a, b) } } #[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fcadd))] +pub fn vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot270.v4f16" + )] + fn _vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + } + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float16x4_t = _vcadd_rot270_f16(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -1063,8 +1089,33 @@ pub fn vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe { _vcaddq_rot270_f16(a, b) } } #[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fcadd))] +pub fn vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot270.v8f16" + )] + fn _vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + } + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = _vcaddq_rot270_f16(a, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -1079,8 +1130,31 @@ pub fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unsafe { _vcadd_rot270_f32(a, b) } } #[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32" + )] + fn _vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float32x2_t = _vcadd_rot270_f32(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -1095,8 +1169,31 @@ pub fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe { _vcaddq_rot270_f32(a, b) } } #[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32" + )] + fn _vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x4_t = _vcaddq_rot270_f32(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -1111,8 +1208,31 @@ pub fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { unsafe { _vcaddq_rot270_f64(a, b) } } #[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64" + )] + fn _vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float64x2_t = _vcaddq_rot270_f64(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -1129,8 +1249,33 @@ pub fn vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { _vcadd_rot90_f16(a, b) } } #[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fcadd))] +pub fn vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot90.v4f16" + )] + fn _vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + } + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float16x4_t = _vcadd_rot90_f16(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -1147,8 +1292,33 @@ pub fn vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe { _vcaddq_rot90_f16(a, b) } } #[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fcadd))] +pub fn vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot90.v8f16" + )] + fn _vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + } + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = _vcaddq_rot90_f16(a, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -1163,8 +1333,31 @@ pub fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unsafe { _vcadd_rot90_f32(a, b) } } #[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32" + )] + fn _vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float32x2_t = _vcadd_rot90_f32(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -1179,8 +1372,31 @@ pub fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe { _vcaddq_rot90_f32(a, b) } } #[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32" + )] + fn _vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x4_t = _vcaddq_rot90_f32(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -1194,6 +1410,28 @@ pub fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } unsafe { _vcaddq_rot90_f64(a, b) } } +#[doc = "Floating-point complex add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcadd))] +pub fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64" + )] + fn _vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float64x2_t = _vcaddq_rot90_f64(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"] #[inline] @@ -2905,6 +3143,7 @@ pub fn vcltzh_f16(a: f16) -> u16 { #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -2921,58 +3160,158 @@ pub fn vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t unsafe { _vcmla_f16(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcmla))] -pub fn vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { +pub fn vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot0.v8f16" + link_name = "llvm.aarch64.neon.vcmla.rot0.v4f16" )] - fn _vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t; + fn _vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t; + } + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let c: float16x4_t = simd_shuffle!(c, c, [3, 2, 1, 0]); + let ret_val: float16x4_t = _vcmla_f16(a, b, c); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vcmlaq_f16(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] +#[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcmla))] -pub fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { +pub fn vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32" + link_name = "llvm.aarch64.neon.vcmla.rot0.v8f16" )] - fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + fn _vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t; } - unsafe { _vcmla_f32(a, b, c) } + unsafe { _vcmlaq_f16(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,fcma")] +#[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcmla))] -pub fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { +pub fn vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32" + link_name = "llvm.aarch64.neon.vcmla.rot0.v8f16" )] - fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + fn _vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t; + } + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: float16x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = _vcmlaq_f16(a, b, c); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32" + )] + fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + } + unsafe { _vcmla_f32(a, b, c) } +} +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32" + )] + fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let c: float32x2_t = simd_shuffle!(c, c, [1, 0]); + let ret_val: float32x2_t = _vcmla_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32" + )] + fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; } unsafe { _vcmlaq_f32(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32" + )] + fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + } + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let c: float32x4_t = simd_shuffle!(c, c, [3, 2, 1, 0]); + let ret_val: float32x4_t = _vcmlaq_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -2987,6 +3326,29 @@ pub fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t unsafe { _vcmlaq_f64(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64" + )] + fn _vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; + } + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let c: float64x2_t = simd_shuffle!(c, c, [1, 0]); + let ret_val: float64x2_t = _vcmlaq_f64(a, b, c); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f16)"] #[inline] #[target_feature(enable = "neon,fcma")] @@ -3141,6 +3503,7 @@ pub fn vcmlaq_laneq_f32( #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -3157,8 +3520,34 @@ pub fn vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float unsafe { _vcmla_rot180_f16(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot180.v4f16" + )] + fn _vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t; + } + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let c: float16x4_t = simd_shuffle!(c, c, [3, 2, 1, 0]); + let ret_val: float16x4_t = _vcmla_rot180_f16(a, b, c); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -3175,8 +3564,34 @@ pub fn vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> floa unsafe { _vcmlaq_rot180_f16(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot180.v8f16" + )] + fn _vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t; + } + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: float16x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = _vcmlaq_rot180_f16(a, b, c); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -3191,8 +3606,32 @@ pub fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float unsafe { _vcmla_rot180_f32(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32" + )] + fn _vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let c: float32x2_t = simd_shuffle!(c, c, [1, 0]); + let ret_val: float32x2_t = _vcmla_rot180_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -3207,8 +3646,32 @@ pub fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> floa unsafe { _vcmlaq_rot180_f32(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32" + )] + fn _vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + } + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let c: float32x4_t = simd_shuffle!(c, c, [3, 2, 1, 0]); + let ret_val: float32x4_t = _vcmlaq_rot180_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -3223,6 +3686,29 @@ pub fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> floa unsafe { _vcmlaq_rot180_f64(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64" + )] + fn _vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; + } + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let c: float64x2_t = simd_shuffle!(c, c, [1, 0]); + let ret_val: float64x2_t = _vcmlaq_rot180_f64(a, b, c); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f16)"] #[inline] #[target_feature(enable = "neon,fcma")] @@ -3377,6 +3863,7 @@ pub fn vcmlaq_rot180_laneq_f32( #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -3393,62 +3880,162 @@ pub fn vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float unsafe { _vcmla_rot270_f16(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcmla))] -pub fn vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { +pub fn vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot270.v8f16" + link_name = "llvm.aarch64.neon.vcmla.rot270.v4f16" )] - fn _vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t; + fn _vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t; + } + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let c: float16x4_t = simd_shuffle!(c, c, [3, 2, 1, 0]); + let ret_val: float16x4_t = _vcmla_rot270_f16(a, b, c); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vcmlaq_rot270_f16(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] +#[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcmla))] -pub fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { +pub fn vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32" + link_name = "llvm.aarch64.neon.vcmla.rot270.v8f16" )] - fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + fn _vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t; } - unsafe { _vcmla_rot270_f32(a, b, c) } + unsafe { _vcmlaq_rot270_f16(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,fcma")] +#[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fcmla))] -pub fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { +pub fn vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32" + link_name = "llvm.aarch64.neon.vcmla.rot270.v8f16" )] - fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + fn _vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t; + } + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: float16x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = _vcmlaq_rot270_f16(a, b, c); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - unsafe { _vcmlaq_rot270_f32(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] -pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { +pub fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32" + )] + fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + } + unsafe { _vcmla_rot270_f32(a, b, c) } +} +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32" + )] + fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let c: float32x2_t = simd_shuffle!(c, c, [1, 0]); + let ret_val: float32x2_t = _vcmla_rot270_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32" + )] + fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + } + unsafe { _vcmlaq_rot270_f32(a, b, c) } +} +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32" + )] + fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + } + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let c: float32x4_t = simd_shuffle!(c, c, [3, 2, 1, 0]); + let ret_val: float32x4_t = _vcmlaq_rot270_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -3459,6 +4046,29 @@ pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> floa unsafe { _vcmlaq_rot270_f64(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64" + )] + fn _vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; + } + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let c: float64x2_t = simd_shuffle!(c, c, [1, 0]); + let ret_val: float64x2_t = _vcmlaq_rot270_f64(a, b, c); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f16)"] #[inline] #[target_feature(enable = "neon,fcma")] @@ -3613,6 +4223,7 @@ pub fn vcmlaq_rot270_laneq_f32( #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -3629,8 +4240,34 @@ pub fn vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float1 unsafe { _vcmla_rot90_f16(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot90.v4f16" + )] + fn _vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t; + } + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let c: float16x4_t = simd_shuffle!(c, c, [3, 2, 1, 0]); + let ret_val: float16x4_t = _vcmla_rot90_f16(a, b, c); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -3647,8 +4284,34 @@ pub fn vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float unsafe { _vcmlaq_rot90_f16(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot90.v8f16" + )] + fn _vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t; + } + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: float16x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = _vcmlaq_rot90_f16(a, b, c); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -3663,8 +4326,32 @@ pub fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 unsafe { _vcmla_rot90_f32(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32" + )] + fn _vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; + } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let c: float32x2_t = simd_shuffle!(c, c, [1, 0]); + let ret_val: float32x2_t = _vcmla_rot90_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -3679,8 +4366,32 @@ pub fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float unsafe { _vcmlaq_rot90_f32(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32" + )] + fn _vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; + } + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let c: float32x4_t = simd_shuffle!(c, c, [3, 2, 1, 0]); + let ret_val: float32x4_t = _vcmlaq_rot90_f32(a, b, c); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -3695,6 +4406,29 @@ pub fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float unsafe { _vcmlaq_rot90_f64(a, b, c) } } #[doc = "Floating-point complex multiply accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fcma")] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] +#[cfg_attr(test, assert_instr(fcmla))] +pub fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64" + )] + fn _vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; + } + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let c: float64x2_t = simd_shuffle!(c, c, [1, 0]); + let ret_val: float64x2_t = _vcmlaq_rot90_f64(a, b, c); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f16)"] #[inline] #[target_feature(enable = "neon,fcma")] @@ -3849,12 +4583,26 @@ pub fn vcmlaq_rot90_laneq_f32( #[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(mov))] pub fn vcombine_f64(a: float64x1_t, b: float64x1_t) -> float64x2_t { unsafe { simd_shuffle!(a, b, [0, 1]) } } +#[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(mov))] +pub fn vcombine_f64(a: float64x1_t, b: float64x1_t) -> float64x2_t { + unsafe { + let ret_val: float64x2_t = simd_shuffle!(a, b, [0, 1]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"] #[inline] @@ -4356,6 +5104,7 @@ pub fn vcopyq_lane_u64( #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr( all(test, target_endian = "little"), @@ -4373,6 +5122,31 @@ pub fn vcopyq_lane_p64( unsafe { simd_insert!(a, LANE1 as u32, simd_extract!(b, LANE2 as u32, p64)) } } #[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr( + all(test, target_endian = "little"), + assert_instr(mov, LANE1 = 1, LANE2 = 0) +)] +#[rustc_legacy_const_generics(1, 3)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vcopyq_lane_p64( + a: poly64x2_t, + b: poly64x1_t, +) -> poly64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert!(LANE2 == 0); + unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: poly64x2_t = vcombine_p64(b, b); + let ret_val: poly64x2_t = + simd_insert!(a, LANE1 as u32, simd_extract!(b, LANE2 as u32, p64)); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"] #[inline] #[target_feature(enable = "neon")] @@ -4740,6 +5514,7 @@ pub fn vcopyq_laneq_p16( #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr( all(test, target_endian = "little"), @@ -4756,17 +5531,42 @@ pub fn vcopyq_laneq_p64( unsafe { simd_insert!(a, LANE1 as u32, simd_extract!(b, LANE2 as u32, p64)) } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + all(test, target_endian = "little"), + assert_instr(mov, LANE1 = 0, LANE2 = 0) +)] +#[rustc_legacy_const_generics(1, 3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vcreate_f64(a: u64) -> float64x1_t { - unsafe { transmute(a) } -} -#[doc = "Floating-point convert"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"] -#[inline] +pub fn vcopyq_laneq_p64( + a: poly64x2_t, + b: poly64x2_t, +) -> poly64x2_t { + static_assert_uimm_bits!(LANE1, 1); + static_assert_uimm_bits!(LANE2, 1); + unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: poly64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: poly64x2_t = + simd_insert!(a, LANE1 as u32, simd_extract!(b, LANE2 as u32, p64)); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vcreate_f64(a: u64) -> float64x1_t { + unsafe { transmute(a) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(all(test, target_endian = "little"), assert_instr(fcvtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7351,6 +8151,7 @@ pub fn vduph_laneq_f16(a: float16x8_t) -> f16 { #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(dup, N = 0))] #[rustc_legacy_const_generics(1)] @@ -7360,8 +8161,24 @@ pub fn vdupq_lane_f64(a: float64x1_t) -> float64x2_t { unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } #[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(dup, N = 0))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vdupq_lane_f64(a: float64x1_t) -> float64x2_t { + static_assert!(N == 0); + unsafe { + let ret_val: float64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(dup, N = 0))] #[rustc_legacy_const_generics(1)] @@ -7371,8 +8188,24 @@ pub fn vdupq_lane_p64(a: poly64x1_t) -> poly64x2_t { unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } #[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(dup, N = 0))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vdupq_lane_p64(a: poly64x1_t) -> poly64x2_t { + static_assert!(N == 0); + unsafe { + let ret_val: poly64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] @@ -7382,8 +8215,25 @@ pub fn vdupq_laneq_f64(a: float64x2_t) -> float64x2_t { unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } #[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(dup, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vdupq_laneq_f64(a: float64x2_t) -> float64x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] @@ -7393,6 +8243,22 @@ pub fn vdupq_laneq_p64(a: poly64x2_t) -> poly64x2_t { unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } #[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(dup, N = 1))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vdupq_laneq_p64(a: poly64x2_t) -> poly64x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: poly64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"] #[inline] #[target_feature(enable = "neon")] @@ -7655,6 +8521,7 @@ pub fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] @@ -7664,8 +8531,26 @@ pub fn vextq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1]) } } #[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ext, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vextq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float64x2_t = simd_shuffle!(a, b, [N as u32, N as u32 + 1]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] @@ -7674,6 +8559,23 @@ pub fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { static_assert_uimm_bits!(N, 1); unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1]) } } +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(ext, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: poly64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: poly64x2_t = simd_shuffle!(a, b, [N as u32, N as u32 + 1]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"] #[inline] @@ -8001,6 +8903,7 @@ pub fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { #[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_high_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8017,8 +8920,34 @@ pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float3 unsafe { _vfmlal_high_f16(r, a, b) } } #[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_high_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fmlal2))] +pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmlal2.v2f32.v4f16" + )] + fn _vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t; + } + unsafe { + let r: float32x2_t = simd_shuffle!(r, r, [1, 0]); + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x2_t = _vfmlal_high_f16(r, a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_high_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8034,6 +8963,31 @@ pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float } unsafe { _vfmlalq_high_f16(r, a, b) } } +#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_high_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fmlal2))] +pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmlal2.v4f32.v8f16" + )] + fn _vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t; + } + unsafe { + let r: float32x4_t = simd_shuffle!(r, r, [3, 2, 1, 0]); + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x4_t = _vfmlalq_high_f16(r, a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} #[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_high_f16)"] #[inline] @@ -8173,6 +9127,7 @@ pub fn vfmlalq_laneq_low_f16( #[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_low_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8189,8 +9144,34 @@ pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32 unsafe { _vfmlal_low_f16(r, a, b) } } #[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_low_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fmlal))] +pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmlal.v2f32.v4f16" + )] + fn _vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t; + } + unsafe { + let r: float32x2_t = simd_shuffle!(r, r, [1, 0]); + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x2_t = _vfmlal_low_f16(r, a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_low_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8206,9 +9187,35 @@ pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float3 } unsafe { _vfmlalq_low_f16(r, a, b) } } +#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_low_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fmlal))] +pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmlal.v4f32.v8f16" + )] + fn _vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t; + } + unsafe { + let r: float32x4_t = simd_shuffle!(r, r, [3, 2, 1, 0]); + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x4_t = _vfmlalq_low_f16(r, a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} #[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_high_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8225,8 +9232,34 @@ pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float3 unsafe { _vfmlsl_high_f16(r, a, b) } } #[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_high_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fmlsl2))] +pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmlsl2.v2f32.v4f16" + )] + fn _vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t; + } + unsafe { + let r: float32x2_t = simd_shuffle!(r, r, [1, 0]); + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x2_t = _vfmlsl_high_f16(r, a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_high_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8242,6 +9275,31 @@ pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float } unsafe { _vfmlslq_high_f16(r, a, b) } } +#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_high_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fmlsl2))] +pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmlsl2.v4f32.v8f16" + )] + fn _vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t; + } + unsafe { + let r: float32x4_t = simd_shuffle!(r, r, [3, 2, 1, 0]); + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x4_t = _vfmlslq_high_f16(r, a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} #[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_high_f16)"] #[inline] @@ -8381,6 +9439,7 @@ pub fn vfmlslq_laneq_low_f16( #[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_low_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8397,8 +9456,34 @@ pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32 unsafe { _vfmlsl_low_f16(r, a, b) } } #[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_low_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fmlsl))] +pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmlsl.v2f32.v4f16" + )] + fn _vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t; + } + unsafe { + let r: float32x2_t = simd_shuffle!(r, r, [1, 0]); + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x2_t = _vfmlsl_low_f16(r, a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_low_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8414,6 +9499,31 @@ pub fn vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float3 } unsafe { _vfmlslq_low_f16(r, a, b) } } +#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_low_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fmlsl))] +pub fn vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmlsl.v4f32.v8f16" + )] + fn _vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t; + } + unsafe { + let r: float32x4_t = simd_shuffle!(r, r, [3, 2, 1, 0]); + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float32x4_t = _vfmlslq_low_f16(r, a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"] #[inline] @@ -8739,15 +9849,30 @@ pub fn vfmsd_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fmov))] +#[cfg_attr(test, assert_instr(nop))] pub fn vget_high_f64(a: float64x2_t) -> float64x1_t { unsafe { float64x1_t([simd_extract!(a, 1)]) } } #[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub fn vget_high_f64(a: float64x2_t) -> float64x1_t { + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + float64x1_t([simd_extract!(a, 1)]) + } +} +#[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -8755,8 +9880,22 @@ pub fn vget_low_f64(a: float64x2_t) -> float64x1_t { unsafe { float64x1_t([simd_extract!(a, 0)]) } } #[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub fn vget_low_f64(a: float64x2_t) -> float64x1_t { + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + float64x1_t([simd_extract!(a, 0)]) + } +} +#[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[rustc_legacy_const_generics(1)] @@ -8765,6 +9904,21 @@ pub fn vgetq_lane_f64(a: float64x2_t) -> f64 { static_assert_uimm_bits!(IMM5, 1); unsafe { simd_extract!(a, IMM5 as u32) } } +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] +pub fn vgetq_lane_f64(a: float64x2_t) -> f64 { + static_assert_uimm_bits!(IMM5, 1); + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + simd_extract!(a, IMM5 as u32) + } +} #[doc = "Load multiple single-element structures to one, two, three, or four registers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"] #[doc = "## Safety"] @@ -13090,6 +14244,7 @@ pub fn vpaddd_u64(a: uint64x2_t) -> u64 { #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -13102,8 +14257,27 @@ pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } } #[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(faddp))] +pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>()); + let ret_val: float16x8_t = simd_add(even, odd); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] @@ -13115,8 +14289,26 @@ pub fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } } #[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>()); + let ret_val: float32x4_t = simd_add(even, odd); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] @@ -13127,9 +14319,27 @@ pub fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { simd_add(even, odd) } } +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>()); + let ret_val: float64x2_t = simd_add(even, odd); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -13141,8 +14351,32 @@ pub fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } } #[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<16>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<16>()); + let ret_val: int8x16_t = simd_add(even, odd); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -13154,8 +14388,26 @@ pub fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } } #[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>()); + let ret_val: int16x8_t = simd_add(even, odd); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -13167,8 +14419,26 @@ pub fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } } #[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>()); + let ret_val: int32x4_t = simd_add(even, odd); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -13180,8 +14450,26 @@ pub fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } } #[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int64x2_t = simd_shuffle!(b, b, [1, 0]); + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>()); + let ret_val: int64x2_t = simd_add(even, odd); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -13193,8 +14481,32 @@ pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } } #[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<16>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<16>()); + let ret_val: uint8x16_t = simd_add(even, odd); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -13206,8 +14518,26 @@ pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } } #[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>()); + let ret_val: uint16x8_t = simd_add(even, odd); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -13219,21 +14549,57 @@ pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } } #[doc = "Add Pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] -pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { +pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unsafe { - let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>()); - let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>()); - simd_add(even, odd) + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>()); + let ret_val: uint32x4_t = simd_add(even, odd); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f16)"] +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>()); + simd_add(even, odd) + } +} +#[doc = "Add Pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(addp))] +pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>()); + let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>()); + let ret_val: uint64x2_t = simd_add(even, odd); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f16)"] +#[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -13249,8 +14615,32 @@ pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { _vpmax_f16(a, b) } } #[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxp.v4f16" + )] + fn _vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + } + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float16x4_t = _vpmax_f16(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -13266,8 +14656,32 @@ pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe { _vpmaxq_f16(a, b) } } #[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxp.v8f16" + )] + fn _vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + } + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = _vpmaxq_f16(a, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -13283,8 +14697,32 @@ pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { _vpmaxnm_f16(a, b) } } #[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmp.v4f16" + )] + fn _vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + } + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float16x4_t = _vpmaxnm_f16(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -13299,9 +14737,33 @@ pub fn vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } unsafe { _vpmaxnmq_f16(a, b) } } +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmp.v8f16" + )] + fn _vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + } + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = _vpmaxnmq_f16(a, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} #[doc = "Floating-point Maximum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -13316,8 +14778,31 @@ pub fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unsafe { _vpmaxnm_f32(a, b) } } #[doc = "Floating-point Maximum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmp.v2f32" + )] + fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float32x2_t = _vpmaxnm_f32(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Floating-point Maximum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -13332,8 +14817,31 @@ pub fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe { _vpmaxnmq_f32(a, b) } } #[doc = "Floating-point Maximum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmp.v4f32" + )] + fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x4_t = _vpmaxnmq_f32(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point Maximum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -13347,9 +14855,32 @@ pub fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } unsafe { _vpmaxnmq_f64(a, b) } } +#[doc = "Floating-point Maximum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmp.v2f64" + )] + fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float64x2_t = _vpmaxnmq_f64(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} #[doc = "Floating-point maximum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -13364,8 +14895,29 @@ pub fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { unsafe { _vpmaxnmqd_f64(a) } } #[doc = "Floating-point maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64" + )] + fn _vpmaxnmqd_f64(a: float64x2_t) -> f64; + } + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + _vpmaxnmqd_f64(a) + } +} +#[doc = "Floating-point maximum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -13379,9 +14931,30 @@ pub fn vpmaxnms_f32(a: float32x2_t) -> f32 { } unsafe { _vpmaxnms_f32(a) } } +#[doc = "Floating-point maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vpmaxnms_f32(a: float32x2_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32" + )] + fn _vpmaxnms_f32(a: float32x2_t) -> f32; + } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + _vpmaxnms_f32(a) + } +} #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] @@ -13396,44 +14969,119 @@ pub fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe { _vpmaxq_f32(a, b) } } #[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] -pub fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { +pub fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxp.v2f64" + link_name = "llvm.aarch64.neon.fmaxp.v4f32" )] - fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x4_t = _vpmaxq_f32(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vpmaxq_f64(a, b) } } #[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxp))] -pub fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v16i8" + link_name = "llvm.aarch64.neon.fmaxp.v2f64" )] - fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; } - unsafe { _vpmaxq_s8(a, b) } + unsafe { _vpmaxq_f64(a, b) } } #[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(smaxp))] -pub fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxp.v2f64" + )] + fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float64x2_t = _vpmaxq_f64(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v16i8" + )] + fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + unsafe { _vpmaxq_s8(a, b) } +} +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v16i8" + )] + fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = _vpmaxq_s8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -13444,8 +15092,31 @@ pub fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unsafe { _vpmaxq_s16(a, b) } } #[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v8i16" + )] + fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = _vpmaxq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxp))] @@ -13460,8 +15131,31 @@ pub fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unsafe { _vpmaxq_s32(a, b) } } #[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.smaxp.v4i32" + )] + fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int32x4_t = _vpmaxq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxp))] @@ -13476,8 +15170,37 @@ pub fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unsafe { _vpmaxq_u8(a, b) } } #[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v16i8" + )] + fn _vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; + } + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = _vpmaxq_u8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxp))] @@ -13492,8 +15215,31 @@ pub fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unsafe { _vpmaxq_u16(a, b) } } #[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v8i16" + )] + fn _vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; + } + unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = _vpmaxq_u16(a, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxp))] @@ -13507,6 +15253,28 @@ pub fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } unsafe { _vpmaxq_u32(a, b) } } +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v4i32" + )] + fn _vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; + } + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vpmaxq_u32(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} #[doc = "Floating-point maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"] #[inline] @@ -13542,6 +15310,7 @@ pub fn vpmaxs_f32(a: float32x2_t) -> f32 { #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -13557,8 +15326,32 @@ pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { _vpmin_f16(a, b) } } #[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fminp))] +pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminp.v4f16" + )] + fn _vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + } + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float16x4_t = _vpmin_f16(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -13574,8 +15367,32 @@ pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe { _vpminq_f16(a, b) } } #[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fminp))] +pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminp.v8f16" + )] + fn _vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + } + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = _vpminq_f16(a, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -13591,8 +15408,32 @@ pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe { _vpminnm_f16(a, b) } } #[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmp.v4f16" + )] + fn _vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + } + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float16x4_t = _vpminnm_f16(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -13607,25 +15448,72 @@ pub fn vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } unsafe { _vpminnmq_f16(a, b) } } -#[doc = "Floating-point Minimum Number Pairwise (vector)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"] +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(fminnmp))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { +pub fn vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnmp.v2f32" + link_name = "llvm.aarch64.neon.fminnmp.v8f16" )] - fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + fn _vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; } - unsafe { _vpminnm_f32(a, b) } -} + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = _vpminnmq_f16(a, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Floating-point Minimum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmp.v2f32" + )] + fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { _vpminnm_f32(a, b) } +} +#[doc = "Floating-point Minimum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmp.v2f32" + )] + fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float32x2_t = _vpminnm_f32(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} #[doc = "Floating-point Minimum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -13640,8 +15528,31 @@ pub fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe { _vpminnmq_f32(a, b) } } #[doc = "Floating-point Minimum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmp.v4f32" + )] + fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x4_t = _vpminnmq_f32(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point Minimum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -13655,9 +15566,32 @@ pub fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } unsafe { _vpminnmq_f64(a, b) } } +#[doc = "Floating-point Minimum Number Pairwise (vector)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmp.v2f64" + )] + fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float64x2_t = _vpminnmq_f64(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} #[doc = "Floating-point minimum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -13672,8 +15606,29 @@ pub fn vpminnmqd_f64(a: float64x2_t) -> f64 { unsafe { _vpminnmqd_f64(a) } } #[doc = "Floating-point minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vpminnmqd_f64(a: float64x2_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64" + )] + fn _vpminnmqd_f64(a: float64x2_t) -> f64; + } + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + _vpminnmqd_f64(a) + } +} +#[doc = "Floating-point minimum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -13687,9 +15642,30 @@ pub fn vpminnms_f32(a: float32x2_t) -> f32 { } unsafe { _vpminnms_f32(a) } } +#[doc = "Floating-point minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(fminnmp))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vpminnms_f32(a: float32x2_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32" + )] + fn _vpminnms_f32(a: float32x2_t) -> f32; + } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + _vpminnms_f32(a) + } +} #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] @@ -13704,8 +15680,31 @@ pub fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe { _vpminq_f32(a, b) } } #[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminp.v4f32" + )] + fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x4_t = _vpminq_f32(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] @@ -13720,8 +15719,31 @@ pub fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { unsafe { _vpminq_f64(a, b) } } #[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminp.v2f64" + )] + fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + } + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float64x2_t = _vpminq_f64(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminp))] @@ -13736,8 +15758,37 @@ pub fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unsafe { _vpminq_s8(a, b) } } #[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v16i8" + )] + fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = _vpminq_s8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminp))] @@ -13752,76 +15803,197 @@ pub fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unsafe { _vpminq_s16(a, b) } } #[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminp))] -pub fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { +pub fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v4i32" + link_name = "llvm.aarch64.neon.sminp.v8i16" )] - fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = _vpminq_s16(a, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - unsafe { _vpminq_s32(a, b) } } #[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminp))] -pub fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { +#[cfg_attr(test, assert_instr(sminp))] +pub fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v16i8" + link_name = "llvm.aarch64.neon.sminp.v4i32" )] - fn _vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; + fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } - unsafe { _vpminq_u8(a, b) } + unsafe { _vpminq_s32(a, b) } } #[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(uminp))] -pub fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { +#[cfg_attr(test, assert_instr(sminp))] +pub fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v8i16" + link_name = "llvm.aarch64.neon.sminp.v4i32" )] - fn _vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; + fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int32x4_t = _vpminq_s32(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vpminq_u16(a, b) } } #[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminp))] -pub fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { +pub fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v4i32" + link_name = "llvm.aarch64.neon.uminp.v16i8" )] - fn _vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; + fn _vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; } - unsafe { _vpminq_u32(a, b) } + unsafe { _vpminq_u8(a, b) } } -#[doc = "Floating-point minimum pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fminp))] -pub fn vpminqd_f64(a: float64x2_t) -> f64 { +#[cfg_attr(test, assert_instr(uminp))] +pub fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v16i8" + )] + fn _vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; + } + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = _vpminq_u8(a, b); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v8i16" + )] + fn _vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; + } + unsafe { _vpminq_u16(a, b) } +} +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v8i16" + )] + fn _vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; + } + unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = _vpminq_u16(a, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v4i32" + )] + fn _vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; + } + unsafe { _vpminq_u32(a, b) } +} +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v4i32" + )] + fn _vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; + } + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vpminq_u32(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Floating-point minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn vpminqd_f64(a: float64x2_t) -> f64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -13832,8 +16004,29 @@ pub fn vpminqd_f64(a: float64x2_t) -> f64 { unsafe { _vpminqd_f64(a) } } #[doc = "Floating-point minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn vpminqd_f64(a: float64x2_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminv.f64.v2f64" + )] + fn _vpminqd_f64(a: float64x2_t) -> f64; + } + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + _vpminqd_f64(a) + } +} +#[doc = "Floating-point minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] @@ -13847,6 +16040,26 @@ pub fn vpmins_f32(a: float32x2_t) -> f32 { } unsafe { _vpmins_f32(a) } } +#[doc = "Floating-point minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn vpmins_f32(a: float32x2_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminv.f32.v2f32" + )] + fn _vpmins_f32(a: float32x2_t) -> f32; + } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + _vpmins_f32(a) + } +} #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"] #[inline] @@ -14143,7 +16356,7 @@ pub fn vqdmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(sqdmlal, LANE = 0))] #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vqdmlalh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { @@ -14154,7 +16367,7 @@ pub fn vqdmlalh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(sqdmlal, LANE = 0))] #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vqdmlalh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { @@ -14165,7 +16378,7 @@ pub fn vqdmlalh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(sqdmlal, LANE = 0))] #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vqdmlals_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { @@ -14176,7 +16389,7 @@ pub fn vqdmlals_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(sqdmlal, LANE = 0))] #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vqdmlals_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { @@ -14187,7 +16400,7 @@ pub fn vqdmlals_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlal))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(sqdmlal))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 { let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c)); @@ -14309,7 +16522,7 @@ pub fn vqdmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(sqdmlsl, LANE = 0))] #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vqdmlslh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { @@ -14320,7 +16533,7 @@ pub fn vqdmlslh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(sqdmlsl, LANE = 0))] #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vqdmlslh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 { @@ -14331,7 +16544,7 @@ pub fn vqdmlslh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(sqdmlsl, LANE = 0))] #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vqdmlsls_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { @@ -14342,7 +16555,7 @@ pub fn vqdmlsls_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(sqdmlsl, LANE = 0))] #[rustc_legacy_const_generics(3)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vqdmlsls_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 { @@ -14353,7 +16566,7 @@ pub fn vqdmlsls_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sqdmlsl))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(sqdmlsl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 { let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c)); @@ -16334,6 +18547,7 @@ fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t { #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16341,8 +18555,25 @@ pub fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t { vqtbl1(a, b) } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t { + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = vqtbl1(a, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16350,8 +18581,30 @@ pub fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { vqtbl1q(a, b) } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = vqtbl1q(a, b); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16359,8 +18612,25 @@ pub fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t { unsafe { transmute(vqtbl1(transmute(a), b)) } } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t { + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbl1(transmute(a), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16368,8 +18638,30 @@ pub fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unsafe { transmute(vqtbl1q(transmute(a), b)) } } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbl1q(transmute(a), b)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16377,8 +18669,25 @@ pub fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t { unsafe { transmute(vqtbl1(transmute(a), b)) } } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t { + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbl1(transmute(a), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16386,6 +18695,27 @@ pub fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t { unsafe { transmute(vqtbl1q(transmute(a), b)) } } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t { + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbl1q(transmute(a), b)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"] #[inline] #[target_feature(enable = "neon")] @@ -16420,6 +18750,7 @@ fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16427,17 +18758,74 @@ pub fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t { vqtbl2(a.0, a.1, b) } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t { - vqtbl2q(a.0, a.1, b) +pub fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t { + let mut a: int8x16x2_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = vqtbl2(a.0, a.1, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t { + vqtbl2q(a.0, a.1, b) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t { + let mut a: int8x16x2_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = vqtbl2q(a.0, a.1, b); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16445,8 +18833,34 @@ pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t { unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) } } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x16x2_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16454,8 +18868,39 @@ pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t { unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) } } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t { + let mut a: uint8x16x2_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16463,8 +18908,34 @@ pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t { unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) } } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x16x2_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16472,6 +18943,36 @@ pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t { unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) } } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t { + let mut a: poly8x16x2_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"] #[inline] #[target_feature(enable = "neon")] @@ -16506,6 +19007,7 @@ fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16513,8 +19015,39 @@ pub fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t { vqtbl3(a.0, a.1, a.2, b) } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t { + let mut a: int8x16x3_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = vqtbl3(a.0, a.1, a.2, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16522,8 +19055,44 @@ pub fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t { vqtbl3q(a.0, a.1, a.2, b) } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t { + let mut a: int8x16x3_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = vqtbl3q(a.0, a.1, a.2, b); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16531,8 +19100,40 @@ pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t { unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) } } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x16x3_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = + transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16540,8 +19141,45 @@ pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t { unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) } } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t { + let mut a: uint8x16x3_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = + transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16549,8 +19187,40 @@ pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t { unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) } } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x16x3_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = + transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16558,6 +19228,42 @@ pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t { unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) } } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t { + let mut a: poly8x16x3_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = + transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"] #[inline] #[target_feature(enable = "neon")] @@ -16604,6 +19310,7 @@ fn vqtbl4q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16611,23 +19318,100 @@ pub fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t { vqtbl4(a.0, a.1, a.2, a.3, b) } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t { - vqtbl4q(a.0, a.1, a.2, a.3, b) +pub fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t { + let mut a: int8x16x4_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.3 = simd_shuffle!( + a.3, + a.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = vqtbl4(a.0, a.1, a.2, a.3, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t { - unsafe { - transmute(vqtbl4( +pub fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t { + vqtbl4q(a.0, a.1, a.2, a.3, b) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t { + let mut a: int8x16x4_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.3 = simd_shuffle!( + a.3, + a.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = vqtbl4q(a.0, a.1, a.2, a.3, b); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t { + unsafe { + transmute(vqtbl4( transmute(a.0), transmute(a.1), transmute(a.2), @@ -16637,8 +19421,50 @@ pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t { } } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t { + let mut a: uint8x16x4_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.3 = simd_shuffle!( + a.3, + a.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16654,8 +19480,55 @@ pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t { } } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t { + let mut a: uint8x16x4_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.3 = simd_shuffle!( + a.3, + a.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbl4q( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16671,8 +19544,50 @@ pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t { } } #[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t { + let mut a: poly8x16x4_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.3 = simd_shuffle!( + a.3, + a.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16687,6 +19602,52 @@ pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t { )) } } +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t { + let mut a: poly8x16x4_t = a; + unsafe { + a.0 = simd_shuffle!( + a.0, + a.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.1 = simd_shuffle!( + a.1, + a.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.2 = simd_shuffle!( + a.2, + a.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + a.3 = simd_shuffle!( + a.3, + a.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbl4q( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + b, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"] #[inline] @@ -16722,6 +19683,7 @@ fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16729,8 +19691,26 @@ pub fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { vqtbx1(a, b, c) } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = vqtbx1(a, b, c); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16738,8 +19718,32 @@ pub fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { vqtbx1q(a, b, c) } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = vqtbx1q(a, b, c); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16747,8 +19751,26 @@ pub fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t { unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) } } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t { + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbx1(transmute(a), transmute(b), c)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16756,8 +19778,32 @@ pub fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) } } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbx1q(transmute(a), transmute(b), c)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16765,8 +19811,26 @@ pub fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t { unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) } } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t { + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbx1(transmute(a), transmute(b), c)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16774,6 +19838,29 @@ pub fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t { unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) } } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t { + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbx1q(transmute(a), transmute(b), c)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"] #[inline] #[target_feature(enable = "neon")] @@ -16808,6 +19895,7 @@ fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16815,17 +19903,77 @@ pub fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t { vqtbx2(a, b.0, b.1, c) } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t { - vqtbx2q(a, b.0, b.1, c) -} -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"] +pub fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t { + let mut b: int8x16x2_t = b; + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = vqtbx2(a, b.0, b.1, c); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t { + vqtbx2q(a, b.0, b.1, c) +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t { + let mut b: int8x16x2_t = b; + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = vqtbx2q(a, b.0, b.1, c); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16833,8 +19981,35 @@ pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t { unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) } } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x16x2_t = b; + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16842,8 +20017,42 @@ pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t { unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) } } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t { + let mut b: uint8x16x2_t = b; + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = + transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16851,8 +20060,35 @@ pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t { unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) } } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x16x2_t = b; + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16860,6 +20096,39 @@ pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t { unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) } } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t { + let mut b: poly8x16x2_t = b; + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = + transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"] #[inline] #[target_feature(enable = "neon")] @@ -16901,6 +20170,7 @@ fn vqtbx3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16908,8 +20178,40 @@ pub fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t { vqtbx3(a, b.0, b.1, b.2, c) } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t { + let mut b: int8x16x3_t = b; + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = vqtbx3(a, b.0, b.1, b.2, c); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16917,8 +20219,46 @@ pub fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t { vqtbx3q(a, b.0, b.1, b.2, c) } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t { + let mut b: int8x16x3_t = b; + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = vqtbx3q(a, b.0, b.1, b.2, c); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16934,8 +20274,46 @@ pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t { } } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x16x3_t = b; + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16951,8 +20329,52 @@ pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t { } } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t { + let mut b: uint8x16x3_t = b; + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbx3q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16968,8 +20390,46 @@ pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t { } } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x16x3_t = b; + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16985,12 +20445,55 @@ pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t { } } #[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -fn vqtbx4( +pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t { + let mut b: poly8x16x3_t = b; + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbx3q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + c, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +fn vqtbx4( a: int8x8_t, b: int8x16_t, c: int8x16_t, @@ -17047,6 +20550,7 @@ fn vqtbx4q( #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17054,8 +20558,45 @@ pub fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t { vqtbx4(a, b.0, b.1, b.2, b.3, c) } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t { + let mut b: int8x16x4_t = b; + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = vqtbx4(a, b.0, b.1, b.2, b.3, c); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17063,8 +20604,51 @@ pub fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t { vqtbx4q(a, b.0, b.1, b.2, b.3, c) } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t { + let mut b: int8x16x4_t = b; + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = vqtbx4q(a, b.0, b.1, b.2, b.3, c); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17081,8 +20665,52 @@ pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t { } } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t { + let mut b: uint8x16x4_t = b; + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = transmute(vqtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17099,8 +20727,58 @@ pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t { } } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t { + let mut b: uint8x16x4_t = b; + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = transmute(vqtbx4q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17117,8 +20795,52 @@ pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t { } } #[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t { + let mut b: poly8x16x4_t = b; + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = transmute(vqtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17134,6 +20856,55 @@ pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t { )) } } +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t { + let mut b: poly8x16x4_t = b; + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!( + b.0, + b.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.1 = simd_shuffle!( + b.1, + b.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.2 = simd_shuffle!( + b.2, + b.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + b.3 = simd_shuffle!( + b.3, + b.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let c: uint8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = transmute(vqtbx4q( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + )); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} #[doc = "Rotate and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"] #[inline] @@ -17418,7 +21189,6 @@ pub fn vrecpxh_f16(a: f16) -> f16 { #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -17427,125 +21197,64 @@ pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t { - unsafe { - let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t { +pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t { - unsafe { - let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t { +pub fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t { - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t { +pub fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t { - unsafe { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17700,7 +21409,6 @@ pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -17708,2935 +21416,2595 @@ pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { - unsafe { - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { +pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { - unsafe { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) - } +pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { +pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { - unsafe { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) - } +pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { +pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { - unsafe { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { +pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { - unsafe { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { +pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { +pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { +pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { +pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { +pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { +pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { +pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { +pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { +pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { +pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { - unsafe { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) - } +pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { +pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { - unsafe { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { +pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { - unsafe { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { +pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { - unsafe { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { +pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { - unsafe { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { +pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { - unsafe { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { +pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { - unsafe { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { +pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { unsafe { transmute(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"] +#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { - unsafe { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] +pub fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32x.v2f32" + )] + fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t; } + unsafe { _vrnd32x_f32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"] -#[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"] +#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { - unsafe { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] +pub fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32x.v4f32" + )] + fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t; } + unsafe { _vrnd32xq_f32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"] +#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { - unsafe { transmute(a) } +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] +pub fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32x.v2f64" + )] + fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t; + } + unsafe { _vrnd32xq_f64(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"] +#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { - unsafe { - let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] +pub fn vrnd32x_f64(a: float64x1_t) -> float64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.frint32x.f64" + )] + fn _vrnd32x_f64(a: f64) -> f64; } + unsafe { transmute(_vrnd32x_f64(vget_lane_f64::<0>(a))) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"] +#[doc = "Floating-point round to 32-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { - unsafe { transmute(a) } +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] +pub fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32z.v2f32" + )] + fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t; + } + unsafe { _vrnd32z_f32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"] +#[doc = "Floating-point round to 32-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { - unsafe { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] +pub fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32z.v4f32" + )] + fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t; } + unsafe { _vrnd32zq_f32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] +#[doc = "Floating-point round to 32-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { - unsafe { transmute(a) } +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] +pub fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint32z.v2f64" + )] + fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t; + } + unsafe { _vrnd32zq_f64(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] +#[doc = "Floating-point round to 32-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { - unsafe { - let a: int8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] +pub fn vrnd32z_f64(a: float64x1_t) -> float64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.frint32z.f64" + )] + fn _vrnd32z_f64(a: f64) -> f64; } + unsafe { transmute(_vrnd32z_f64(vget_lane_f64::<0>(a))) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"] +#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { - unsafe { transmute(a) } +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] +pub fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64x.v2f32" + )] + fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t; + } + unsafe { _vrnd64x_f32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"] +#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { - unsafe { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] +pub fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64x.v4f32" + )] + fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t; } + unsafe { _vrnd64xq_f32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] +#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { - unsafe { transmute(a) } +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] +pub fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64x.v2f64" + )] + fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t; + } + unsafe { _vrnd64xq_f64(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] +#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { - unsafe { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] +pub fn vrnd64x_f64(a: float64x1_t) -> float64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.frint64x.f64" + )] + fn _vrnd64x_f64(a: f64) -> f64; } + unsafe { transmute(_vrnd64x_f64(vget_lane_f64::<0>(a))) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"] +#[doc = "Floating-point round to 64-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { - unsafe { transmute(a) } +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] +pub fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64z.v2f32" + )] + fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t; + } + unsafe { _vrnd64z_f32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"] +#[doc = "Floating-point round to 64-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { - unsafe { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] +pub fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64z.v4f32" + )] + fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t; } + unsafe { _vrnd64zq_f32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] +#[doc = "Floating-point round to 64-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { - unsafe { transmute(a) } +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] +pub fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frint64z.v2f64" + )] + fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t; + } + unsafe { _vrnd64zq_f64(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] +#[doc = "Floating-point round to 64-bit integer toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { - unsafe { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[target_feature(enable = "neon,frintts")] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] +pub fn vrnd64z_f64(a: float64x1_t) -> float64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.frint64z.f64" + )] + fn _vrnd64z_f64(a: f64) -> f64; } + unsafe { transmute(_vrnd64z_f64(vget_lane_f64::<0>(a))) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"] +#[doc = "Floating-point round to integral, toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { - unsafe { transmute(a) } +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(frintz))] +pub fn vrnd_f16(a: float16x4_t) -> float16x4_t { + unsafe { simd_trunc(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"] +#[doc = "Floating-point round to integral, toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { - unsafe { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) - } +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(frintz))] +pub fn vrndq_f16(a: float16x8_t) -> float16x8_t { + unsafe { simd_trunc(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] +#[doc = "Floating-point round to integral, toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { - unsafe { transmute(a) } +#[cfg_attr(test, assert_instr(frintz))] +pub fn vrnd_f32(a: float32x2_t) -> float32x2_t { + unsafe { simd_trunc(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] +#[doc = "Floating-point round to integral, toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { - unsafe { - let a: uint8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +#[cfg_attr(test, assert_instr(frintz))] +pub fn vrndq_f32(a: float32x4_t) -> float32x4_t { + unsafe { simd_trunc(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"] +#[doc = "Floating-point round to integral, toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { - unsafe { transmute(a) } +#[cfg_attr(test, assert_instr(frintz))] +pub fn vrnd_f64(a: float64x1_t) -> float64x1_t { + unsafe { simd_trunc(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"] +#[doc = "Floating-point round to integral, toward zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { - unsafe { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) - } +#[cfg_attr(test, assert_instr(frintz))] +pub fn vrndq_f64(a: float64x2_t) -> float64x2_t { + unsafe { simd_trunc(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { - unsafe { transmute(a) } +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(frinta))] +pub fn vrnda_f16(a: float16x4_t) -> float16x4_t { + unsafe { simd_round(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { - unsafe { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(frinta))] +pub fn vrndaq_f16(a: float16x8_t) -> float16x8_t { + unsafe { simd_round(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"] +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { - unsafe { transmute(a) } +#[cfg_attr(test, assert_instr(frinta))] +pub fn vrnda_f32(a: float32x2_t) -> float32x2_t { + unsafe { simd_round(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"] +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { - unsafe { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) - } +#[cfg_attr(test, assert_instr(frinta))] +pub fn vrndaq_f32(a: float32x4_t) -> float32x4_t { + unsafe { simd_round(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { - unsafe { transmute(a) } +#[cfg_attr(test, assert_instr(frinta))] +pub fn vrnda_f64(a: float64x1_t) -> float64x1_t { + unsafe { simd_round(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { - unsafe { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +#[cfg_attr(test, assert_instr(frinta))] +pub fn vrndaq_f64(a: float64x2_t) -> float64x2_t { + unsafe { simd_round(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"] +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndah_f16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { - unsafe { transmute(a) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(frinta))] +pub fn vrndah_f16(a: f16) -> f16 { + roundf16(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"] +#[doc = "Floating-point round to integral, to nearest with ties to away"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndh_f16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { - unsafe { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) - } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(frintz))] +pub fn vrndh_f16(a: f16) -> f16 { + truncf16(a) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { - unsafe { transmute(a) } +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(frinti))] +pub fn vrndi_f16(a: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.nearbyint.v4f16" + )] + fn _vrndi_f16(a: float16x4_t) -> float16x4_t; + } + unsafe { _vrndi_f16(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { - unsafe { - let a: poly8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(frinti))] +pub fn vrndiq_f16(a: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.nearbyint.v8f16" + )] + fn _vrndiq_f16(a: float16x8_t) -> float16x8_t; } + unsafe { _vrndiq_f16(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"] +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { - unsafe { transmute(a) } +#[cfg_attr(test, assert_instr(frinti))] +pub fn vrndi_f32(a: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.nearbyint.v2f32" + )] + fn _vrndi_f32(a: float32x2_t) -> float32x2_t; + } + unsafe { _vrndi_f32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"] +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { - unsafe { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +#[cfg_attr(test, assert_instr(frinti))] +pub fn vrndiq_f32(a: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.nearbyint.v4f32" + )] + fn _vrndiq_f32(a: float32x4_t) -> float32x4_t; } + unsafe { _vrndiq_f32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] +#[cfg_attr(test, assert_instr(frinti))] +pub fn vrndi_f64(a: float64x1_t) -> float64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.nearbyint.v1f64" + )] + fn _vrndi_f64(a: float64x1_t) -> float64x1_t; + } + unsafe { _vrndi_f64(a) } +} +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { - unsafe { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +#[cfg_attr(test, assert_instr(frinti))] +pub fn vrndiq_f64(a: float64x2_t) -> float64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.nearbyint.v2f64" + )] + fn _vrndiq_f64(a: float64x2_t) -> float64x2_t; } + unsafe { _vrndiq_f64(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"] +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndih_f16)"] +#[inline] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(frinti))] +pub fn vrndih_f16(a: f16) -> f16 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.nearbyint.f16" + )] + fn _vrndih_f16(a: f16) -> f16; + } + unsafe { _vrndih_f16(a) } +} +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f16)"] +#[inline] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(frintm))] +pub fn vrndm_f16(a: float16x4_t) -> float16x4_t { + unsafe { simd_floor(a) } +} +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f16)"] +#[inline] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(frintm))] +pub fn vrndmq_f16(a: float16x8_t) -> float16x8_t { + unsafe { simd_floor(a) } +} +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { - unsafe { transmute(a) } +#[cfg_attr(test, assert_instr(frintm))] +pub fn vrndm_f32(a: float32x2_t) -> float32x2_t { + unsafe { simd_floor(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"] +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +#[cfg_attr(test, assert_instr(frintm))] +pub fn vrndmq_f32(a: float32x4_t) -> float32x4_t { + unsafe { simd_floor(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"] +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { - unsafe { transmute(a) } +#[cfg_attr(test, assert_instr(frintm))] +pub fn vrndm_f64(a: float64x1_t) -> float64x1_t { + unsafe { simd_floor(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"] +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { - unsafe { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +#[cfg_attr(test, assert_instr(frintm))] +pub fn vrndmq_f64(a: float64x2_t) -> float64x2_t { + unsafe { simd_floor(a) } } -#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"] +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmh_f16)"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] -pub fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32x.v2f32" - )] - fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t; - } - unsafe { _vrnd32x_f32(a) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(frintm))] +pub fn vrndmh_f16(a: f16) -> f16 { + floorf16(a) } -#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"] +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] -pub fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn vrndn_f64(a: float64x1_t) -> float64x1_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32x.v4f32" + link_name = "llvm.roundeven.v1f64" )] - fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t; + fn _vrndn_f64(a: float64x1_t) -> float64x1_t; } - unsafe { _vrnd32xq_f32(a) } + unsafe { _vrndn_f64(a) } } -#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"] +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] -pub fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn vrndnq_f64(a: float64x2_t) -> float64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32x.v2f64" + link_name = "llvm.roundeven.v2f64" )] - fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t; + fn _vrndnq_f64(a: float64x2_t) -> float64x2_t; } - unsafe { _vrnd32xq_f64(a) } + unsafe { _vrndnq_f64(a) } } -#[doc = "Floating-point round to 32-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"] +#[doc = "Floating-point round to integral, toward minus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnh_f16)"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] -pub fn vrnd32x_f64(a: float64x1_t) -> float64x1_t { +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(frintn))] +pub fn vrndnh_f16(a: f16) -> f16 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.frint32x.f64" + link_name = "llvm.roundeven.f16" )] - fn _vrnd32x_f64(a: f64) -> f64; + fn _vrndnh_f16(a: f16) -> f16; } - unsafe { transmute(_vrnd32x_f64(vget_lane_f64::<0>(a))) } + unsafe { _vrndnh_f16(a) } } -#[doc = "Floating-point round to 32-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"] +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] -pub fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn vrndns_f32(a: f32) -> f32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32z.v2f32" + link_name = "llvm.roundeven.f32" )] - fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t; + fn _vrndns_f32(a: f32) -> f32; } - unsafe { _vrnd32z_f32(a) } + unsafe { _vrndns_f32(a) } } -#[doc = "Floating-point round to 32-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"] +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f16)"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] -pub fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32z.v4f32" - )] - fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t; - } - unsafe { _vrnd32zq_f32(a) } +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(frintp))] +pub fn vrndp_f16(a: float16x4_t) -> float16x4_t { + unsafe { simd_ceil(a) } } -#[doc = "Floating-point round to 32-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"] +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f16)"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] -pub fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint32z.v2f64" - )] - fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t; - } - unsafe { _vrnd32zq_f64(a) } +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(frintp))] +pub fn vrndpq_f16(a: float16x8_t) -> float16x8_t { + unsafe { simd_ceil(a) } } -#[doc = "Floating-point round to 32-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"] -#[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] -pub fn vrnd32z_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.frint32z.f64" - )] - fn _vrnd32z_f64(a: f64) -> f64; - } - unsafe { transmute(_vrnd32z_f64(vget_lane_f64::<0>(a))) } -} -#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"] -#[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] -pub fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64x.v2f32" - )] - fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t; - } - unsafe { _vrnd64x_f32(a) } -} -#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"] -#[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] -pub fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64x.v4f32" - )] - fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t; - } - unsafe { _vrnd64xq_f32(a) } -} -#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"] -#[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] -pub fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64x.v2f64" - )] - fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t; - } - unsafe { _vrnd64xq_f64(a) } -} -#[doc = "Floating-point round to 64-bit integer, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"] +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] -pub fn vrnd64x_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.frint64x.f64" - )] - fn _vrnd64x_f64(a: f64) -> f64; - } - unsafe { transmute(_vrnd64x_f64(vget_lane_f64::<0>(a))) } +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn vrndp_f32(a: float32x2_t) -> float32x2_t { + unsafe { simd_ceil(a) } } -#[doc = "Floating-point round to 64-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"] +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] -pub fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64z.v2f32" - )] - fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t; - } - unsafe { _vrnd64z_f32(a) } +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn vrndpq_f32(a: float32x4_t) -> float32x4_t { + unsafe { simd_ceil(a) } } -#[doc = "Floating-point round to 64-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"] +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] -pub fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64z.v4f32" - )] - fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t; - } - unsafe { _vrnd64zq_f32(a) } +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn vrndp_f64(a: float64x1_t) -> float64x1_t { + unsafe { simd_ceil(a) } } -#[doc = "Floating-point round to 64-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"] +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] -pub fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frint64z.v2f64" - )] - fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t; - } - unsafe { _vrnd64zq_f64(a) } +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn vrndpq_f64(a: float64x2_t) -> float64x2_t { + unsafe { simd_ceil(a) } } -#[doc = "Floating-point round to 64-bit integer toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"] +#[doc = "Floating-point round to integral, toward plus infinity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndph_f16)"] #[inline] -#[target_feature(enable = "neon,frintts")] -#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] -pub fn vrnd64z_f64(a: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.frint64z.f64" - )] - fn _vrnd64z_f64(a: f64) -> f64; - } - unsafe { transmute(_vrnd64z_f64(vget_lane_f64::<0>(a))) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(frintp))] +pub fn vrndph_f16(a: f16) -> f16 { + ceilf16(a) } -#[doc = "Floating-point round to integral, toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f16)"] +#[doc = "Floating-point round to integral exact, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f16)"] #[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frintz))] -pub fn vrnd_f16(a: float16x4_t) -> float16x4_t { - unsafe { simd_trunc(a) } +#[cfg_attr(test, assert_instr(frintx))] +pub fn vrndx_f16(a: float16x4_t) -> float16x4_t { + unsafe { simd_round_ties_even(a) } } -#[doc = "Floating-point round to integral, toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f16)"] +#[doc = "Floating-point round to integral exact, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f16)"] #[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frintz))] -pub fn vrndq_f16(a: float16x8_t) -> float16x8_t { - unsafe { simd_trunc(a) } +#[cfg_attr(test, assert_instr(frintx))] +pub fn vrndxq_f16(a: float16x8_t) -> float16x8_t { + unsafe { simd_round_ties_even(a) } } -#[doc = "Floating-point round to integral, toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"] +#[doc = "Floating-point round to integral exact, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintz))] -pub fn vrnd_f32(a: float32x2_t) -> float32x2_t { - unsafe { simd_trunc(a) } +#[cfg_attr(test, assert_instr(frintx))] +pub fn vrndx_f32(a: float32x2_t) -> float32x2_t { + unsafe { simd_round_ties_even(a) } } -#[doc = "Floating-point round to integral, toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"] +#[doc = "Floating-point round to integral exact, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintz))] -pub fn vrndq_f32(a: float32x4_t) -> float32x4_t { - unsafe { simd_trunc(a) } +#[cfg_attr(test, assert_instr(frintx))] +pub fn vrndxq_f32(a: float32x4_t) -> float32x4_t { + unsafe { simd_round_ties_even(a) } } -#[doc = "Floating-point round to integral, toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"] +#[doc = "Floating-point round to integral exact, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintz))] -pub fn vrnd_f64(a: float64x1_t) -> float64x1_t { - unsafe { simd_trunc(a) } +#[cfg_attr(test, assert_instr(frintx))] +pub fn vrndx_f64(a: float64x1_t) -> float64x1_t { + unsafe { simd_round_ties_even(a) } } -#[doc = "Floating-point round to integral, toward zero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"] +#[doc = "Floating-point round to integral exact, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintz))] -pub fn vrndq_f64(a: float64x2_t) -> float64x2_t { - unsafe { simd_trunc(a) } -} -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f16)"] -#[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frinta))] -pub fn vrnda_f16(a: float16x4_t) -> float16x4_t { - unsafe { simd_round(a) } +#[cfg_attr(test, assert_instr(frintx))] +pub fn vrndxq_f64(a: float64x2_t) -> float64x2_t { + unsafe { simd_round_ties_even(a) } } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f16)"] +#[doc = "Floating-point round to integral, using current rounding mode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxh_f16)"] #[inline] #[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frinta))] -pub fn vrndaq_f16(a: float16x8_t) -> float16x8_t { - unsafe { simd_round(a) } +#[cfg_attr(test, assert_instr(frintx))] +pub fn vrndxh_f16(a: f16) -> f16 { + round_ties_even_f16(a) } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(srshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinta))] -pub fn vrnda_f32(a: float32x2_t) -> float32x2_t { - unsafe { simd_round(a) } +pub fn vrshld_s64(a: i64, b: i64) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.i64" + )] + fn _vrshld_s64(a: i64, b: i64) -> i64; + } + unsafe { _vrshld_s64(a, b) } } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(urshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinta))] -pub fn vrndaq_f32(a: float32x4_t) -> float32x4_t { - unsafe { simd_round(a) } +pub fn vrshld_u64(a: u64, b: i64) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.i64" + )] + fn _vrshld_u64(a: u64, b: i64) -> u64; + } + unsafe { _vrshld_u64(a, b) } } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(srshr, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinta))] -pub fn vrnda_f64(a: float64x1_t) -> float64x1_t { - unsafe { simd_round(a) } +pub fn vrshrd_n_s64(a: i64) -> i64 { + static_assert!(N >= 1 && N <= 64); + vrshld_s64(a, -N as i64) } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(urshr, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinta))] -pub fn vrndaq_f64(a: float64x2_t) -> float64x2_t { - unsafe { simd_round(a) } +pub fn vrshrd_n_u64(a: u64) -> u64 { + static_assert!(N >= 1 && N <= 64); + vrshld_u64(a, -N as i64) } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndah_f16)"] +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"] #[inline] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frinta))] -pub fn vrndah_f16(a: f16) -> f16 { - roundf16(a) +#[target_feature(enable = "neon")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + vcombine_s8(a, vrshrn_n_s16::(b)) } -#[doc = "Floating-point round to integral, to nearest with ties to away"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndh_f16)"] +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"] #[inline] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frintz))] -pub fn vrndh_f16(a: f16) -> f16 { - truncf16(a) +#[target_feature(enable = "neon")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + vcombine_s16(a, vrshrn_n_s32::(b)) } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f16)"] +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"] #[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frinti))] -pub fn vrndi_f16(a: float16x4_t) -> float16x4_t { +#[target_feature(enable = "neon")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + vcombine_s32(a, vrshrn_n_s64::(b)) +} +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + vcombine_u8(a, vrshrn_n_u16::(b)) +} +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + vcombine_u16(a, vrshrn_n_u32::(b)) +} +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(rshrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + vcombine_u32(a, vrshrn_n_u64::(b)) +} +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frsqrte))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vrsqrte_f64(a: float64x1_t) -> float64x1_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.v4f16" + link_name = "llvm.aarch64.neon.frsqrte.v1f64" )] - fn _vrndi_f16(a: float16x4_t) -> float16x4_t; + fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t; } - unsafe { _vrndi_f16(a) } + unsafe { _vrsqrte_f64(a) } } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f16)"] +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"] #[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frinti))] -pub fn vrndiq_f16(a: float16x8_t) -> float16x8_t { +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frsqrte))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.v8f16" + link_name = "llvm.aarch64.neon.frsqrte.v2f64" )] - fn _vrndiq_f16(a: float16x8_t) -> float16x8_t; + fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t; } - unsafe { _vrndiq_f16(a) } + unsafe { _vrsqrteq_f64(a) } } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"] +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinti))] -pub fn vrndi_f32(a: float32x2_t) -> float32x2_t { +pub fn vrsqrted_f64(a: f64) -> f64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.v2f32" + link_name = "llvm.aarch64.neon.frsqrte.f64" )] - fn _vrndi_f32(a: float32x2_t) -> float32x2_t; + fn _vrsqrted_f64(a: f64) -> f64; } - unsafe { _vrndi_f32(a) } + unsafe { _vrsqrted_f64(a) } } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"] +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinti))] -pub fn vrndiq_f32(a: float32x4_t) -> float32x4_t { +pub fn vrsqrtes_f32(a: f32) -> f32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.v4f32" + link_name = "llvm.aarch64.neon.frsqrte.f32" )] - fn _vrndiq_f32(a: float32x4_t) -> float32x4_t; + fn _vrsqrtes_f32(a: f32) -> f32; } - unsafe { _vrndiq_f32(a) } + unsafe { _vrsqrtes_f32(a) } } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"] +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteh_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinti))] -pub fn vrndi_f64(a: float64x1_t) -> float64x1_t { +#[cfg_attr(test, assert_instr(frsqrte))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vrsqrteh_f16(a: f16) -> f16 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.v1f64" + link_name = "llvm.aarch64.neon.frsqrte.f16" )] - fn _vrndi_f64(a: float64x1_t) -> float64x1_t; + fn _vrsqrteh_f16(a: f16) -> f16; } - unsafe { _vrndi_f64(a) } + unsafe { _vrsqrteh_f16(a) } } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"] +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frinti))] -pub fn vrndiq_f64(a: float64x2_t) -> float64x2_t { +pub fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.v2f64" + link_name = "llvm.aarch64.neon.frsqrts.v1f64" )] - fn _vrndiq_f64(a: float64x2_t) -> float64x2_t; + fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; } - unsafe { _vrndiq_f64(a) } + unsafe { _vrsqrts_f64(a, b) } } -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndih_f16)"] +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"] #[inline] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frinti))] -pub fn vrndih_f16(a: f16) -> f16 { +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frsqrts))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.nearbyint.f16" + link_name = "llvm.aarch64.neon.frsqrts.v2f64" )] - fn _vrndih_f16(a: f16) -> f16; + fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; } - unsafe { _vrndih_f16(a) } + unsafe { _vrsqrtsq_f64(a, b) } } -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f16)"] -#[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frintm))] -pub fn vrndm_f16(a: float16x4_t) -> float16x4_t { - unsafe { simd_floor(a) } -} -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f16)"] -#[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frintm))] -pub fn vrndmq_f16(a: float16x8_t) -> float16x8_t { - unsafe { simd_floor(a) } -} -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintm))] -pub fn vrndm_f32(a: float32x2_t) -> float32x2_t { - unsafe { simd_floor(a) } -} -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintm))] -pub fn vrndmq_f32(a: float32x4_t) -> float32x4_t { - unsafe { simd_floor(a) } -} -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintm))] -pub fn vrndm_f64(a: float64x1_t) -> float64x1_t { - unsafe { simd_floor(a) } -} -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintm))] -pub fn vrndmq_f64(a: float64x2_t) -> float64x2_t { - unsafe { simd_floor(a) } -} -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmh_f16)"] -#[inline] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frintm))] -pub fn vrndmh_f16(a: f16) -> f16 { - floorf16(a) -} -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"] +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintn))] -pub fn vrndn_f64(a: float64x1_t) -> float64x1_t { +pub fn vrsqrtsd_f64(a: f64, b: f64) -> f64 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.roundeven.v1f64" + link_name = "llvm.aarch64.neon.frsqrts.f64" )] - fn _vrndn_f64(a: float64x1_t) -> float64x1_t; + fn _vrsqrtsd_f64(a: f64, b: f64) -> f64; } - unsafe { _vrndn_f64(a) } + unsafe { _vrsqrtsd_f64(a, b) } } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"] +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintn))] -pub fn vrndnq_f64(a: float64x2_t) -> float64x2_t { +pub fn vrsqrtss_f32(a: f32, b: f32) -> f32 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.roundeven.v2f64" + link_name = "llvm.aarch64.neon.frsqrts.f32" )] - fn _vrndnq_f64(a: float64x2_t) -> float64x2_t; + fn _vrsqrtss_f32(a: f32, b: f32) -> f32; } - unsafe { _vrndnq_f64(a) } + unsafe { _vrsqrtss_f32(a, b) } } -#[doc = "Floating-point round to integral, toward minus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnh_f16)"] +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsh_f16)"] #[inline] #[target_feature(enable = "neon,fp16")] +#[cfg_attr(test, assert_instr(frsqrts))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frintn))] -pub fn vrndnh_f16(a: f16) -> f16 { +pub fn vrsqrtsh_f16(a: f16, b: f16) -> f16 { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.roundeven.f16" + link_name = "llvm.aarch64.neon.frsqrts.f16" )] - fn _vrndnh_f16(a: f16) -> f16; + fn _vrsqrtsh_f16(a: f16, b: f16) -> f16; } - unsafe { _vrndnh_f16(a) } + unsafe { _vrsqrtsh_f16(a, b) } } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"] +#[doc = "Signed rounding shift right and accumulate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(srshr, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintn))] -pub fn vrndns_f32(a: f32) -> f32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.roundeven.f32" - )] - fn _vrndns_f32(a: f32) -> f32; - } - unsafe { _vrndns_f32(a) } -} -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f16)"] -#[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frintp))] -pub fn vrndp_f16(a: float16x4_t) -> float16x4_t { - unsafe { simd_ceil(a) } -} -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f16)"] -#[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frintp))] -pub fn vrndpq_f16(a: float16x8_t) -> float16x8_t { - unsafe { simd_ceil(a) } +pub fn vrsrad_n_s64(a: i64, b: i64) -> i64 { + static_assert!(N >= 1 && N <= 64); + let b: i64 = vrshrd_n_s64::(b); + a.wrapping_add(b) } -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"] +#[doc = "Unsigned rounding shift right and accumulate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(urshr, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintp))] -pub fn vrndp_f32(a: float32x2_t) -> float32x2_t { - unsafe { simd_ceil(a) } +pub fn vrsrad_n_u64(a: u64, b: u64) -> u64 { + static_assert!(N >= 1 && N <= 64); + let b: u64 = vrshrd_n_u64::(b); + a.wrapping_add(b) } -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintp))] -pub fn vrndpq_f32(a: float32x4_t) -> float32x4_t { - unsafe { simd_ceil(a) } +pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { + vcombine_s8(a, vrsubhn_s16(b, c)) } -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintp))] -pub fn vrndp_f64(a: float64x1_t) -> float64x1_t { - unsafe { simd_ceil(a) } +pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { + vcombine_s16(a, vrsubhn_s32(b, c)) } -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintp))] -pub fn vrndpq_f64(a: float64x2_t) -> float64x2_t { - unsafe { simd_ceil(a) } -} -#[doc = "Floating-point round to integral, toward plus infinity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndph_f16)"] -#[inline] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frintp))] -pub fn vrndph_f16(a: f16) -> f16 { - ceilf16(a) -} -#[doc = "Floating-point round to integral exact, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f16)"] -#[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frintx))] -pub fn vrndx_f16(a: float16x4_t) -> float16x4_t { - unsafe { simd_round_ties_even(a) } -} -#[doc = "Floating-point round to integral exact, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f16)"] -#[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frintx))] -pub fn vrndxq_f16(a: float16x8_t) -> float16x8_t { - unsafe { simd_round_ties_even(a) } +pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { + vcombine_s32(a, vrsubhn_s64(b, c)) } -#[doc = "Floating-point round to integral exact, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintx))] -pub fn vrndx_f32(a: float32x2_t) -> float32x2_t { - unsafe { simd_round_ties_even(a) } +pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { + vcombine_u8(a, vrsubhn_u16(b, c)) } -#[doc = "Floating-point round to integral exact, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintx))] -pub fn vrndxq_f32(a: float32x4_t) -> float32x4_t { - unsafe { simd_round_ties_even(a) } +pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { + vcombine_u16(a, vrsubhn_u32(b, c)) } -#[doc = "Floating-point round to integral exact, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] +#[cfg_attr(test, assert_instr(rsubhn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintx))] -pub fn vrndx_f64(a: float64x1_t) -> float64x1_t { - unsafe { simd_round_ties_even(a) } +pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { + vcombine_u32(a, vrsubhn_u64(b, c)) } -#[doc = "Floating-point round to integral exact, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] +#[cfg_attr(test, assert_instr(rsubhn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(frintx))] -pub fn vrndxq_f64(a: float64x2_t) -> float64x2_t { - unsafe { simd_round_ties_even(a) } -} -#[doc = "Floating-point round to integral, using current rounding mode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxh_f16)"] -#[inline] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(frintx))] -pub fn vrndxh_f16(a: f16) -> f16 { - round_ties_even_f16(a) -} -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(srshl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrshld_s64(a: i64, b: i64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.i64" - )] - fn _vrshld_s64(a: i64, b: i64) -> i64; - } - unsafe { _vrshld_s64(a, b) } -} -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(urshl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrshld_u64(a: u64, b: i64) -> u64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.i64" - )] - fn _vrshld_u64(a: u64, b: i64) -> u64; - } - unsafe { _vrshld_u64(a, b) } -} -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(srshr, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrshrd_n_s64(a: i64) -> i64 { - static_assert!(N >= 1 && N <= 64); - vrshld_s64(a, -N as i64) -} -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(urshr, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrshrd_n_u64(a: u64) -> u64 { - static_assert!(N >= 1 && N <= 64); - vrshld_u64(a, -N as i64) -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(rshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - vcombine_s8(a, vrshrn_n_s16::(b)) +pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { + vcombine_s8(a, vrsubhn_s16(b, c)) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(rshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_endian = "big")] +#[cfg_attr(test, assert_instr(rsubhn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - vcombine_s16(a, vrshrn_n_s32::(b)) +pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { + vcombine_s16(a, vrsubhn_s32(b, c)) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(rshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_endian = "big")] +#[cfg_attr(test, assert_instr(rsubhn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - vcombine_s32(a, vrshrn_n_s64::(b)) +pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { + vcombine_s32(a, vrsubhn_s64(b, c)) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(rshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_endian = "big")] +#[cfg_attr(test, assert_instr(rsubhn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - vcombine_u8(a, vrshrn_n_u16::(b)) +pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { + vcombine_u8(a, vrsubhn_u16(b, c)) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(rshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_endian = "big")] +#[cfg_attr(test, assert_instr(rsubhn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - vcombine_u16(a, vrshrn_n_u32::(b)) +pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { + vcombine_u16(a, vrsubhn_u32(b, c)) } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(rshrn2, N = 2))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_endian = "big")] +#[cfg_attr(test, assert_instr(rsubhn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - vcombine_u32(a, vrshrn_n_u64::(b)) +pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { + vcombine_u32(a, vrsubhn_u64(b, c)) } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"] +#[doc = "Multi-vector floating-point adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrte))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsqrte_f64(a: float64x1_t) -> float64x1_t { +#[unstable(feature = "stdarch_neon_fp8", issue = "none")] +#[target_feature(enable = "neon,fp8")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))] +pub fn vscale_f16(vn: float16x4_t, vm: int16x4_t) -> float16x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.v1f64" + link_name = "llvm.aarch64.neon.fp8.fscale.v4f16" )] - fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t; + fn _vscale_f16(vn: float16x4_t, vm: int16x4_t) -> float16x4_t; } - unsafe { _vrsqrte_f64(a) } + unsafe { _vscale_f16(vn, vm) } } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"] +#[doc = "Multi-vector floating-point adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrte))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { +#[unstable(feature = "stdarch_neon_fp8", issue = "none")] +#[target_feature(enable = "neon,fp8")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))] +pub fn vscaleq_f16(vn: float16x8_t, vm: int16x8_t) -> float16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.v2f64" + link_name = "llvm.aarch64.neon.fp8.fscale.v8f16" )] - fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t; + fn _vscaleq_f16(vn: float16x8_t, vm: int16x8_t) -> float16x8_t; } - unsafe { _vrsqrteq_f64(a) } + unsafe { _vscaleq_f16(vn, vm) } } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"] +#[doc = "Multi-vector floating-point adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrte))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsqrted_f64(a: f64) -> f64 { +#[unstable(feature = "stdarch_neon_fp8", issue = "none")] +#[target_feature(enable = "neon,fp8")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))] +pub fn vscale_f32(vn: float32x2_t, vm: int32x2_t) -> float32x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.f64" + link_name = "llvm.aarch64.neon.fp8.fscale.v2f32" )] - fn _vrsqrted_f64(a: f64) -> f64; + fn _vscale_f32(vn: float32x2_t, vm: int32x2_t) -> float32x2_t; } - unsafe { _vrsqrted_f64(a) } + unsafe { _vscale_f32(vn, vm) } } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"] +#[doc = "Multi-vector floating-point adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrte))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsqrtes_f32(a: f32) -> f32 { +#[unstable(feature = "stdarch_neon_fp8", issue = "none")] +#[target_feature(enable = "neon,fp8")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))] +pub fn vscaleq_f32(vn: float32x4_t, vm: int32x4_t) -> float32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.f32" + link_name = "llvm.aarch64.neon.fp8.fscale.v4f32" )] - fn _vrsqrtes_f32(a: f32) -> f32; + fn _vscaleq_f32(vn: float32x4_t, vm: int32x4_t) -> float32x4_t; } - unsafe { _vrsqrtes_f32(a) } + unsafe { _vscaleq_f32(vn, vm) } } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteh_f16)"] +#[doc = "Multi-vector floating-point adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f64)"] #[inline] -#[cfg_attr(test, assert_instr(frsqrte))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vrsqrteh_f16(a: f16) -> f16 { +#[unstable(feature = "stdarch_neon_fp8", issue = "none")] +#[target_feature(enable = "neon,fp8")] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))] +pub fn vscaleq_f64(vn: float64x2_t, vm: int64x2_t) -> float64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.f16" + link_name = "llvm.aarch64.neon.fp8.fscale.v2f64" )] - fn _vrsqrteh_f16(a: f16) -> f16; + fn _vscaleq_f64(vn: float64x2_t, vm: int64x2_t) -> float64x2_t; } - unsafe { _vrsqrteh_f16(a) } + unsafe { _vscaleq_f64(vn, vm) } } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrts))] +#[cfg_attr(test, assert_instr(nop, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.v1f64" - )] - fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - } - unsafe { _vrsqrts_f64(a, b) } +pub fn vset_lane_f64(a: f64, b: float64x1_t) -> float64x1_t { + static_assert!(LANE == 0); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"] -#[inline] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"] +#[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrts))] +#[cfg_attr(test, assert_instr(nop, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { +pub fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { simd_insert!(b, LANE as u32, a) } +} +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(nop, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float64x2_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "SHA512 hash update part 2"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512h2))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.v2f64" + link_name = "llvm.aarch64.crypto.sha512h2" )] - fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; + fn _vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t; } - unsafe { _vrsqrtsq_f64(a, b) } + unsafe { _vsha512h2q_u64(a, b, c) } } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"] +#[doc = "SHA512 hash update part 2"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrts))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsqrtsd_f64(a: f64, b: f64) -> f64 { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512h2))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.f64" + link_name = "llvm.aarch64.crypto.sha512h2" )] - fn _vrsqrtsd_f64(a: f64, b: f64) -> f64; + fn _vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t; + } + unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let c: uint64x2_t = simd_shuffle!(c, c, [1, 0]); + let ret_val: uint64x2_t = _vsha512h2q_u64(a, b, c); + simd_shuffle!(ret_val, ret_val, [1, 0]) } - unsafe { _vrsqrtsd_f64(a, b) } } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"] +#[doc = "SHA512 hash update part 1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(frsqrts))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsqrtss_f32(a: f32, b: f32) -> f32 { +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512h))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.f32" + link_name = "llvm.aarch64.crypto.sha512h" )] - fn _vrsqrtss_f32(a: f32, b: f32) -> f32; + fn _vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t; } - unsafe { _vrsqrtss_f32(a, b) } + unsafe { _vsha512hq_u64(a, b, c) } } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsh_f16)"] +#[doc = "SHA512 hash update part 1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"] #[inline] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr(test, assert_instr(frsqrts))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vrsqrtsh_f16(a: f16, b: f16) -> f16 { +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512h))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.f16" + link_name = "llvm.aarch64.crypto.sha512h" )] - fn _vrsqrtsh_f16(a: f16, b: f16) -> f16; + fn _vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t; + } + unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let c: uint64x2_t = simd_shuffle!(c, c, [1, 0]); + let ret_val: uint64x2_t = _vsha512hq_u64(a, b, c); + simd_shuffle!(ret_val, ret_val, [1, 0]) } - unsafe { _vrsqrtsh_f16(a, b) } } -#[doc = "Signed rounding shift right and accumulate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"] +#[doc = "SHA512 schedule update 0"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(srshr, N = 2))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsrad_n_s64(a: i64, b: i64) -> i64 { - static_assert!(N >= 1 && N <= 64); - let b: i64 = vrshrd_n_s64::(b); - a.wrapping_add(b) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512su0))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha512su0" + )] + fn _vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; + } + unsafe { _vsha512su0q_u64(a, b) } } -#[doc = "Unsigned rounding shift right and accumulate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"] +#[doc = "SHA512 schedule update 0"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(urshr, N = 2))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsrad_n_u64(a: u64, b: u64) -> u64 { - static_assert!(N >= 1 && N <= 64); - let b: u64 = vrshrd_n_u64::(b); - a.wrapping_add(b) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512su0))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha512su0" + )] + fn _vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; + } + unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint64x2_t = _vsha512su0q_u64(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"] +#[doc = "SHA512 schedule update 1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_endian = "little")] -#[cfg_attr(test, assert_instr(rsubhn2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { - vcombine_s8(a, vrsubhn_s16(b, c)) +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512su1))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha512su1" + )] + fn _vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t; + } + unsafe { _vsha512su1q_u64(a, b, c) } } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"] +#[doc = "SHA512 schedule update 1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(target_endian = "little")] -#[cfg_attr(test, assert_instr(rsubhn2))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { - vcombine_s16(a, vrsubhn_s32(b, c)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(sha512su1))] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha512su1" + )] + fn _vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t; + } + unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let c: uint64x2_t = simd_shuffle!(c, c, [1, 0]); + let ret_val: uint64x2_t = _vsha512su1q_u64(a, b, c); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_endian = "little")] -#[cfg_attr(test, assert_instr(rsubhn2))] +#[cfg_attr(test, assert_instr(sshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { - vcombine_s32(a, vrsubhn_s64(b, c)) +pub fn vshld_s64(a: i64, b: i64) -> i64 { + unsafe { transmute(vshl_s64(transmute(a), transmute(b))) } } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_endian = "little")] -#[cfg_attr(test, assert_instr(rsubhn2))] +#[cfg_attr(test, assert_instr(ushl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { - vcombine_u8(a, vrsubhn_u16(b, c)) +pub fn vshld_u64(a: u64, b: i64) -> u64 { + unsafe { transmute(vshl_u64(transmute(a), transmute(b))) } } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_endian = "little")] -#[cfg_attr(test, assert_instr(rsubhn2))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(sshll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { - vcombine_u16(a, vrsubhn_u32(b, c)) +pub fn vshll_high_n_s8(a: int8x16_t) -> int16x8_t { + static_assert!(N >= 0 && N <= 8); + let b = vget_high_s8(a); + vshll_n_s8::(b) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_endian = "little")] -#[cfg_attr(test, assert_instr(rsubhn2))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(sshll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { - vcombine_u32(a, vrsubhn_u64(b, c)) +pub fn vshll_high_n_s16(a: int16x8_t) -> int32x4_t { + static_assert!(N >= 0 && N <= 16); + let b = vget_high_s16(a); + vshll_n_s16::(b) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_endian = "big")] -#[cfg_attr(test, assert_instr(rsubhn))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(sshll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { - vcombine_s8(a, vrsubhn_s16(b, c)) +pub fn vshll_high_n_s32(a: int32x4_t) -> int64x2_t { + static_assert!(N >= 0 && N <= 32); + let b = vget_high_s32(a); + vshll_n_s32::(b) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_endian = "big")] -#[cfg_attr(test, assert_instr(rsubhn))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(ushll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { - vcombine_s16(a, vrsubhn_s32(b, c)) +pub fn vshll_high_n_u8(a: uint8x16_t) -> uint16x8_t { + static_assert!(N >= 0 && N <= 8); + let b: uint8x8_t = vget_high_u8(a); + vshll_n_u8::(b) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_endian = "big")] -#[cfg_attr(test, assert_instr(rsubhn))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(ushll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { - vcombine_s32(a, vrsubhn_s64(b, c)) +pub fn vshll_high_n_u16(a: uint16x8_t) -> uint32x4_t { + static_assert!(N >= 0 && N <= 16); + let b: uint16x4_t = vget_high_u16(a); + vshll_n_u16::(b) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_endian = "big")] -#[cfg_attr(test, assert_instr(rsubhn))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(ushll2, N = 2))] +#[rustc_legacy_const_generics(1)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { - vcombine_u8(a, vrsubhn_u16(b, c)) +pub fn vshll_high_n_u32(a: uint32x4_t) -> uint64x2_t { + static_assert!(N >= 0 && N <= 32); + let b: uint32x2_t = vget_high_u32(a); + vshll_n_u32::(b) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_endian = "big")] -#[cfg_attr(test, assert_instr(rsubhn))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(shrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { - vcombine_u16(a, vrsubhn_u32(b, c)) +pub fn vshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + vcombine_s8(a, vshrn_n_s16::(b)) } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_endian = "big")] -#[cfg_attr(test, assert_instr(rsubhn))] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(shrn2, N = 2))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { - vcombine_u32(a, vrsubhn_u64(b, c)) +pub fn vshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + vcombine_s16(a, vshrn_n_s32::(b)) } -#[doc = "Multi-vector floating-point adjust exponent"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f16)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"] #[inline] -#[unstable(feature = "stdarch_neon_fp8", issue = "none")] -#[target_feature(enable = "neon,fp8")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))] -pub fn vscale_f16(vn: float16x4_t, vm: int16x4_t) -> float16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fp8.fscale.v4f16" - )] - fn _vscale_f16(vn: float16x4_t, vm: int16x4_t) -> float16x4_t; - } - unsafe { _vscale_f16(vn, vm) } +#[target_feature(enable = "neon")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(shrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + vcombine_s32(a, vshrn_n_s64::(b)) } -#[doc = "Multi-vector floating-point adjust exponent"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f16)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"] #[inline] -#[unstable(feature = "stdarch_neon_fp8", issue = "none")] -#[target_feature(enable = "neon,fp8")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))] -pub fn vscaleq_f16(vn: float16x8_t, vm: int16x8_t) -> float16x8_t { +#[target_feature(enable = "neon")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(shrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + vcombine_u8(a, vshrn_n_u16::(b)) +} +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(shrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + vcombine_u16(a, vshrn_n_u32::(b)) +} +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(shrn2, N = 2))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + vcombine_u32(a, vshrn_n_u64::(b)) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fp8.fscale.v8f16" + link_name = "llvm.aarch64.neon.vsli.v8i8" )] - fn _vscaleq_f16(vn: float16x8_t, vm: int16x8_t) -> float16x8_t; + fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t; } - unsafe { _vscaleq_f16(vn, vm) } + unsafe { _vsli_n_s8(a, b, N) } } -#[doc = "Multi-vector floating-point adjust exponent"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f32)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] #[inline] -#[unstable(feature = "stdarch_neon_fp8", issue = "none")] -#[target_feature(enable = "neon,fp8")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))] -pub fn vscale_f32(vn: float32x2_t, vm: int32x2_t) -> float32x2_t { +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fp8.fscale.v2f32" + link_name = "llvm.aarch64.neon.vsli.v16i8" )] - fn _vscale_f32(vn: float32x2_t, vm: int32x2_t) -> float32x2_t; + fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t; } - unsafe { _vscale_f32(vn, vm) } + unsafe { _vsliq_n_s8(a, b, N) } } -#[doc = "Multi-vector floating-point adjust exponent"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f32)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] #[inline] -#[unstable(feature = "stdarch_neon_fp8", issue = "none")] -#[target_feature(enable = "neon,fp8")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))] -pub fn vscaleq_f32(vn: float32x4_t, vm: int32x4_t) -> float32x4_t { +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 4); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fp8.fscale.v4f32" + link_name = "llvm.aarch64.neon.vsli.v4i16" )] - fn _vscaleq_f32(vn: float32x4_t, vm: int32x4_t) -> float32x4_t; + fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t; } - unsafe { _vscaleq_f32(vn, vm) } + unsafe { _vsli_n_s16(a, b, N) } } -#[doc = "Multi-vector floating-point adjust exponent"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] #[inline] -#[unstable(feature = "stdarch_neon_fp8", issue = "none")] -#[target_feature(enable = "neon,fp8")] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))] -pub fn vscaleq_f64(vn: float64x2_t, vm: int64x2_t) -> float64x2_t { +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 4); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fp8.fscale.v2f64" + link_name = "llvm.aarch64.neon.vsli.v8i16" )] - fn _vscaleq_f64(vn: float64x2_t, vm: int64x2_t) -> float64x2_t; + fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t; } - unsafe { _vscaleq_f64(vn, vm) } + unsafe { _vsliq_n_s16(a, b, N) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vset_lane_f64(a: f64, b: float64x1_t) -> float64x1_t { - static_assert!(LANE == 0); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N >= 0 && N <= 31); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.vsli.v2i32" + )] + fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t; + } + unsafe { _vsli_n_s32(a, b, N) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(nop, LANE = 0))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { simd_insert!(b, LANE as u32, a) } -} -#[doc = "SHA512 hash update part 2"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"] -#[inline] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(sha512h2))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { +pub fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N >= 0 && N <= 31); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha512h2" + link_name = "llvm.aarch64.neon.vsli.v4i32" )] - fn _vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t; + fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t; } - unsafe { _vsha512h2q_u64(a, b, c) } + unsafe { _vsliq_n_s32(a, b, N) } } -#[doc = "SHA512 hash update part 1"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"] -#[inline] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(sha512h))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha512h" - )] - fn _vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t; - } - unsafe { _vsha512hq_u64(a, b, c) } -} -#[doc = "SHA512 schedule update 0"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(sha512su0))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(N >= 0 && N <= 63); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha512su0" + link_name = "llvm.aarch64.neon.vsli.v1i64" )] - fn _vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; + fn _vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t; } - unsafe { _vsha512su0q_u64(a, b) } + unsafe { _vsli_n_s64(a, b, N) } } -#[doc = "SHA512 schedule update 1"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(sha512su1))] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N >= 0 && N <= 63); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha512su1" + link_name = "llvm.aarch64.neon.vsli.v2i64" )] - fn _vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t; + fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t; } - unsafe { _vsha512su1q_u64(a, b, c) } + unsafe { _vsliq_n_s64(a, b, N) } } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sshl))] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vshld_s64(a: i64, b: i64) -> i64 { - unsafe { transmute(vshl_s64(transmute(a), transmute(b))) } +pub fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { transmute(vsli_n_s8::(transmute(a), transmute(b))) } } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(ushl))] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vshld_u64(a: u64, b: i64) -> u64 { - unsafe { transmute(vshl_u64(transmute(a), transmute(b))) } +pub fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + unsafe { transmute(vsliq_n_s8::(transmute(a), transmute(b))) } } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(sshll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vshll_high_n_s8(a: int8x16_t) -> int16x8_t { - static_assert!(N >= 0 && N <= 8); - let b = vget_high_s8(a); - vshll_n_s8::(b) +pub fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + unsafe { transmute(vsli_n_s16::(transmute(a), transmute(b))) } } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(sshll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vshll_high_n_s16(a: int16x8_t) -> int32x4_t { - static_assert!(N >= 0 && N <= 16); - let b = vget_high_s16(a); - vshll_n_s16::(b) +pub fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + unsafe { transmute(vsliq_n_s16::(transmute(a), transmute(b))) } } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(sshll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vshll_high_n_s32(a: int32x4_t) -> int64x2_t { - static_assert!(N >= 0 && N <= 32); - let b = vget_high_s32(a); - vshll_n_s32::(b) +pub fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 0 && N <= 31); + unsafe { transmute(vsli_n_s32::(transmute(a), transmute(b))) } } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(ushll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vshll_high_n_u8(a: uint8x16_t) -> uint16x8_t { - static_assert!(N >= 0 && N <= 8); - let b: uint8x8_t = vget_high_u8(a); - vshll_n_u8::(b) +pub fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 0 && N <= 31); + unsafe { transmute(vsliq_n_s32::(transmute(a), transmute(b))) } } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(ushll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vshll_high_n_u16(a: uint16x8_t) -> uint32x4_t { - static_assert!(N >= 0 && N <= 16); - let b: uint16x4_t = vget_high_u16(a); - vshll_n_u16::(b) +pub fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 0 && N <= 63); + unsafe { transmute(vsli_n_s64::(transmute(a), transmute(b))) } } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(ushll2, N = 2))] -#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vshll_high_n_u32(a: uint32x4_t) -> uint64x2_t { - static_assert!(N >= 0 && N <= 32); - let b: uint32x2_t = vget_high_u32(a); - vshll_n_u32::(b) +pub fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 0 && N <= 63); + unsafe { transmute(vsliq_n_s64::(transmute(a), transmute(b))) } } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(shrn2, N = 2))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - vcombine_s8(a, vshrn_n_s16::(b)) +pub fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { transmute(vsli_n_s8::(transmute(a), transmute(b))) } } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(shrn2, N = 2))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - vcombine_s16(a, vshrn_n_s32::(b)) +pub fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 3); + unsafe { transmute(vsliq_n_s8::(transmute(a), transmute(b))) } } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(shrn2, N = 2))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - vcombine_s32(a, vshrn_n_s64::(b)) +pub fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 4); + unsafe { transmute(vsli_n_s16::(transmute(a), transmute(b))) } } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(shrn2, N = 2))] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - vcombine_u8(a, vshrn_n_u16::(b)) +pub fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 4); + unsafe { transmute(vsliq_n_s16::(transmute(a), transmute(b))) } } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(shrn2, N = 2))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - vcombine_u16(a, vshrn_n_u32::(b)) +pub fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { + static_assert!(N >= 0 && N <= 63); + unsafe { transmute(vsli_n_s64::(transmute(a), transmute(b))) } } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(shrn2, N = 2))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - vcombine_u32(a, vshrn_n_u64::(b)) +pub fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + static_assert!(N >= 0 && N <= 63); + unsafe { transmute(vsliq_n_s64::(transmute(a), transmute(b))) } } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr( +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))] +pub fn vslid_n_s64(a: i64, b: i64) -> i64 { + static_assert!(N >= 0 && N <= 63); + unsafe { transmute(vsli_n_s64::(transmute(a), transmute(b))) } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))] +pub fn vslid_n_u64(a: u64, b: u64) -> u64 { + static_assert!(N >= 0 && N <= 63); + unsafe { transmute(vsli_n_u64::(transmute(a), transmute(b))) } +} +#[doc = "SM3PARTW1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3partw1))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v8i8" + link_name = "llvm.aarch64.crypto.sm3partw1" )] - fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t; + fn _vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; } - unsafe { _vsli_n_s8(a, b, N) } + unsafe { _vsm3partw1q_u32(a, b, c) } } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] +#[doc = "SM3PARTW1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3partw1))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v16i8" + link_name = "llvm.aarch64.crypto.sm3partw1" )] - fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t; + fn _vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; + } + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let c: uint32x4_t = simd_shuffle!(c, c, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsm3partw1q_u32(a, b, c); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vsliq_n_s8(a, b, N) } } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] +#[doc = "SM3PARTW2"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 4); +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3partw2))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v4i16" + link_name = "llvm.aarch64.crypto.sm3partw2" )] - fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t; + fn _vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; } - unsafe { _vsli_n_s16(a, b, N) } + unsafe { _vsm3partw2q_u32(a, b, c) } } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] +#[doc = "SM3PARTW2"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 4); +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3partw2))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v8i16" + link_name = "llvm.aarch64.crypto.sm3partw2" )] - fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t; + fn _vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; + } + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let c: uint32x4_t = simd_shuffle!(c, c, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsm3partw2q_u32(a, b, c); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vsliq_n_s16(a, b, N) } } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] +#[doc = "SM3SS1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 0 && N <= 31); +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3ss1))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v2i32" + link_name = "llvm.aarch64.crypto.sm3ss1" )] - fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t; + fn _vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; } - unsafe { _vsli_n_s32(a, b, N) } + unsafe { _vsm3ss1q_u32(a, b, c) } } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] +#[doc = "SM3SS1"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 0 && N <= 31); +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3ss1))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v4i32" + link_name = "llvm.aarch64.crypto.sm3ss1" )] - fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t; + fn _vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; + } + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let c: uint32x4_t = simd_shuffle!(c, c, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsm3ss1q_u32(a, b, c); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vsliq_n_s32(a, b, N) } } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"] +#[doc = "SM3TT1A"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N >= 0 && N <= 63); +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub fn vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(IMM2, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v1i64" + link_name = "llvm.aarch64.crypto.sm3tt1a" )] - fn _vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t; + fn _vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t; } - unsafe { _vsli_n_s64(a, b, N) } + unsafe { _vsm3tt1aq_u32(a, b, c, IMM2 as i64) } } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] +#[doc = "SM3TT1A"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 0 && N <= 63); +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub fn vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(IMM2, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.vsli.v2i64" + link_name = "llvm.aarch64.crypto.sm3tt1a" )] - fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t; + fn _vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t; + } + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let c: uint32x4_t = simd_shuffle!(c, c, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsm3tt1aq_u32(a, b, c, IMM2 as i64); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vsliq_n_s64(a, b, N) } } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] +#[doc = "SM3TT1B"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { transmute(vsli_n_s8::(transmute(a), transmute(b))) } +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub fn vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(IMM2, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3tt1b" + )] + fn _vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t; + } + unsafe { _vsm3tt1bq_u32(a, b, c, IMM2 as i64) } } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] +#[doc = "SM3TT1B"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe { transmute(vsliq_n_s8::(transmute(a), transmute(b))) } +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub fn vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(IMM2, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm3tt1b" + )] + fn _vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t; + } + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let c: uint32x4_t = simd_shuffle!(c, c, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsm3tt1bq_u32(a, b, c, IMM2 as i64); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - unsafe { transmute(vsli_n_s16::(transmute(a), transmute(b))) } -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - unsafe { transmute(vsliq_n_s16::(transmute(a), transmute(b))) } -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 0 && N <= 31); - unsafe { transmute(vsli_n_s32::(transmute(a), transmute(b))) } -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 0 && N <= 31); - unsafe { transmute(vsliq_n_s32::(transmute(a), transmute(b))) } -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 0 && N <= 63); - unsafe { transmute(vsli_n_s64::(transmute(a), transmute(b))) } -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 0 && N <= 63); - unsafe { transmute(vsliq_n_s64::(transmute(a), transmute(b))) } -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { transmute(vsli_n_s8::(transmute(a), transmute(b))) } -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe { transmute(vsliq_n_s8::(transmute(a), transmute(b))) } -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 4); - unsafe { transmute(vsli_n_s16::(transmute(a), transmute(b))) } -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 4); - unsafe { transmute(vsliq_n_s16::(transmute(a), transmute(b))) } -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { - static_assert!(N >= 0 && N <= 63); - unsafe { transmute(vsli_n_s64::(transmute(a), transmute(b))) } -} -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(test, assert_instr(sli, N = 1))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - static_assert!(N >= 0 && N <= 63); - unsafe { transmute(vsliq_n_s64::(transmute(a), transmute(b))) } -} -#[doc = "Shift left and insert"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))] -pub fn vslid_n_s64(a: i64, b: i64) -> i64 { - static_assert!(N >= 0 && N <= 63); - unsafe { transmute(vsli_n_s64::(transmute(a), transmute(b))) } -} -#[doc = "Shift left and insert"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))] -pub fn vslid_n_u64(a: u64, b: u64) -> u64 { - static_assert!(N >= 0 && N <= 63); - unsafe { transmute(vsli_n_u64::(transmute(a), transmute(b))) } -} -#[doc = "SM3PARTW1"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"] -#[inline] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3partw1))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3partw1" - )] - fn _vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; - } - unsafe { _vsm3partw1q_u32(a, b, c) } -} -#[doc = "SM3PARTW2"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"] -#[inline] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3partw2))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3partw2" - )] - fn _vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; - } - unsafe { _vsm3partw2q_u32(a, b, c) } -} -#[doc = "SM3SS1"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"] -#[inline] -#[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3ss1))] -#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3ss1" - )] - fn _vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; - } - unsafe { _vsm3ss1q_u32(a, b, c) } -} -#[doc = "SM3TT1A"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"] +#[doc = "SM3TT2A"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))] +#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub fn vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { +pub fn vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { static_assert_uimm_bits!(IMM2, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3tt1a" + link_name = "llvm.aarch64.crypto.sm3tt2a" )] - fn _vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t; + fn _vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t; } - unsafe { _vsm3tt1aq_u32(a, b, c, IMM2 as i64) } + unsafe { _vsm3tt2aq_u32(a, b, c, IMM2 as i64) } } -#[doc = "SM3TT1B"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"] +#[doc = "SM3TT2A"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))] +#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub fn vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { +pub fn vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { static_assert_uimm_bits!(IMM2, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3tt1b" + link_name = "llvm.aarch64.crypto.sm3tt2a" )] - fn _vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t; + fn _vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t; + } + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let c: uint32x4_t = simd_shuffle!(c, c, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsm3tt2aq_u32(a, b, c, IMM2 as i64); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vsm3tt1bq_u32(a, b, c, IMM2 as i64) } } -#[doc = "SM3TT2A"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"] +#[doc = "SM3TT2B"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,sm4")] -#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))] +#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] -pub fn vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { +pub fn vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { static_assert_uimm_bits!(IMM2, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sm3tt2a" + link_name = "llvm.aarch64.crypto.sm3tt2b" )] - fn _vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t; + fn _vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t; } - unsafe { _vsm3tt2aq_u32(a, b, c, IMM2 as i64) } + unsafe { _vsm3tt2bq_u32(a, b, c, IMM2 as i64) } } #[doc = "SM3TT2B"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))] #[rustc_legacy_const_generics(3)] @@ -20650,11 +24018,18 @@ pub fn vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_ )] fn _vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t; } - unsafe { _vsm3tt2bq_u32(a, b, c, IMM2 as i64) } + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let c: uint32x4_t = simd_shuffle!(c, c, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsm3tt2bq_u32(a, b, c, IMM2 as i64); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "SM4 key"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm4ekey))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] @@ -20668,9 +24043,32 @@ pub fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } unsafe { _vsm4ekeyq_u32(a, b) } } -#[doc = "SM4 encode"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"] +#[doc = "SM4 key"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm4ekey))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm4ekey" + )] + fn _vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; + } + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsm4ekeyq_u32(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "SM4 encode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"] +#[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm4e))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] @@ -20684,6 +24082,28 @@ pub fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } unsafe { _vsm4eq_u32(a, b) } } +#[doc = "SM4 encode"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,sm4")] +#[cfg_attr(test, assert_instr(sm4e))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] +pub fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sm4e" + )] + fn _vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; + } + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsm4eq_u32(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"] #[inline] @@ -22526,2432 +25946,5222 @@ pub unsafe fn vstl1q_lane_s64(ptr: *mut i64, val: int64x2_t) { let lane: i64 = vgetq_lane_s64::(val); (*atomic_dst).store(transmute(lane), crate::sync::atomic::Ordering::Release) } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"] +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sub))] +pub fn vsubd_s64(a: i64, b: i64) -> i64 { + a.wrapping_sub(b) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(sub))] +pub fn vsubd_u64(a: u64, b: u64) -> u64 { + a.wrapping_sub(b) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubh_f16)"] +#[inline] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(fsub))] +pub fn vsubh_f16(a: f16, b: f16) -> f16 { + a - b +} +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(ssubl2))] +pub fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { + unsafe { + let c: int16x8_t = simd_cast(vget_high_s8(a)); + let d: int16x8_t = simd_cast(vget_high_s8(b)); + simd_sub(c, d) + } +} +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(ssubl2))] +pub fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { + unsafe { + let c: int32x4_t = simd_cast(vget_high_s16(a)); + let d: int32x4_t = simd_cast(vget_high_s16(b)); + simd_sub(c, d) + } +} +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(ssubl2))] +pub fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { + unsafe { + let c: int64x2_t = simd_cast(vget_high_s32(a)); + let d: int64x2_t = simd_cast(vget_high_s32(b)); + simd_sub(c, d) + } +} +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(usubl2))] +pub fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { + unsafe { + let c: uint16x8_t = simd_cast(vget_high_u8(a)); + let d: uint16x8_t = simd_cast(vget_high_u8(b)); + simd_sub(c, d) + } +} +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(usubl2))] +pub fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { + unsafe { + let c: uint32x4_t = simd_cast(vget_high_u16(a)); + let d: uint32x4_t = simd_cast(vget_high_u16(b)); + simd_sub(c, d) + } +} +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(usubl2))] +pub fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { + unsafe { + let c: uint64x2_t = simd_cast(vget_high_u32(a)); + let d: uint64x2_t = simd_cast(vget_high_u32(b)); + simd_sub(c, d) + } +} +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(ssubw2))] +pub fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { + let c = vget_high_s8(b); + unsafe { simd_sub(a, simd_cast(c)) } +} +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(ssubw2))] +pub fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { + let c = vget_high_s16(b); + unsafe { simd_sub(a, simd_cast(c)) } +} +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(ssubw2))] +pub fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { + let c = vget_high_s32(b); + unsafe { simd_sub(a, simd_cast(c)) } +} +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(usubw2))] +pub fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { + let c = vget_high_u8(b); + unsafe { simd_sub(a, simd_cast(c)) } +} +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(usubw2))] +pub fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { + let c = vget_high_u16(b); + unsafe { simd_sub(a, simd_cast(c)) } +} +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(all(test, target_endian = "little"), assert_instr(usubw2))] +pub fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { + let c = vget_high_u32(b); + unsafe { simd_sub(a, simd_cast(c)) } +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + vqtbl1_s8(vcombine_s8(a, unsafe { crate::mem::zeroed() }), unsafe { + { + transmute(b) + } + }) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + vqtbl1_u8(vcombine_u8(a, unsafe { crate::mem::zeroed() }), b) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { + vqtbl1_p8(vcombine_p8(a, unsafe { crate::mem::zeroed() }), b) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { + vqtbl1_s8(vcombine_s8(a.0, a.1), vreinterpret_u8_s8(b)) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { + vqtbl1_u8(vcombine_u8(a.0, a.1), b) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { + vqtbl1_p8(vcombine_p8(a.0, a.1), b) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { + let x = int8x16x2_t( + vcombine_s8(a.0, a.1), + vcombine_s8(a.2, unsafe { crate::mem::zeroed() }), + ); + vqtbl2_s8(x, vreinterpret_u8_s8(b)) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { + let x = uint8x16x2_t( + vcombine_u8(a.0, a.1), + vcombine_u8(a.2, unsafe { crate::mem::zeroed() }), + ); + vqtbl2_u8(x, b) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { + let x = poly8x16x2_t( + vcombine_p8(a.0, a.1), + vcombine_p8(a.2, unsafe { crate::mem::zeroed() }), + ); + vqtbl2_p8(x, b) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { + let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3)); + vqtbl2_s8(x, vreinterpret_u8_s8(b)) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { + let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3)); + vqtbl2_u8(x, b) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbl))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { + let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3)); + vqtbl2_p8(x, b) +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + unsafe { + simd_select( + simd_lt::(c, transmute(i8x8::splat(8))), + vqtbx1_s8( + a, + vcombine_s8(b, crate::mem::zeroed()), + vreinterpret_u8_s8(c), + ), + a, + ) + } +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + unsafe { + simd_select( + simd_lt::(c, transmute(u8x8::splat(8))), + vqtbx1_u8(a, vcombine_u8(b, crate::mem::zeroed()), c), + a, + ) + } +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { + unsafe { + simd_select( + simd_lt::(c, transmute(u8x8::splat(8))), + vqtbx1_p8(a, vcombine_p8(b, crate::mem::zeroed()), c), + a, + ) + } +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { + unsafe { + simd_select( + simd_lt::(c, transmute(i8x8::splat(16))), + vqtbx1_s8(a, vcombine_s8(b.0, b.1), vreinterpret_u8_s8(c)), + a, + ) + } +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { + unsafe { + simd_select( + simd_lt::(c, transmute(u8x8::splat(16))), + vqtbx1_u8(a, vcombine_u8(b.0, b.1), c), + a, + ) + } +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { + unsafe { + simd_select( + simd_lt::(c, transmute(u8x8::splat(16))), + vqtbx1_p8(a, vcombine_p8(b.0, b.1), c), + a, + ) + } +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { + let x = int8x16x2_t( + vcombine_s8(b.0, b.1), + vcombine_s8(b.2, unsafe { crate::mem::zeroed() }), + ); + unsafe { + simd_select( + simd_lt::(c, transmute(i8x8::splat(24))), + vqtbx2_s8(a, x, vreinterpret_u8_s8(c)), + a, + ) + } +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { + let x = uint8x16x2_t( + vcombine_u8(b.0, b.1), + vcombine_u8(b.2, unsafe { crate::mem::zeroed() }), + ); + unsafe { + simd_select( + simd_lt::(c, transmute(u8x8::splat(24))), + vqtbx2_u8(a, x, c), + a, + ) + } +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { + let x = poly8x16x2_t( + vcombine_p8(b.0, b.1), + vcombine_p8(b.2, unsafe { crate::mem::zeroed() }), + ); + unsafe { + simd_select( + simd_lt::(c, transmute(u8x8::splat(24))), + vqtbx2_p8(a, x, c), + a, + ) + } +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { + let x = int8x16x2_t(vcombine_s8(b.0, b.1), vcombine_s8(b.2, b.3)); + unsafe { + simd_select( + simd_lt::(c, transmute(i8x8::splat(32))), + vqtbx2_s8(a, x, vreinterpret_u8_s8(c)), + a, + ) + } +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { + let x = uint8x16x2_t(vcombine_u8(b.0, b.1), vcombine_u8(b.2, b.3)); + unsafe { + simd_select( + simd_lt::(c, transmute(u8x8::splat(32))), + vqtbx2_u8(a, x, c), + a, + ) + } +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tbx))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { + let x = poly8x16x2_t(vcombine_p8(b.0, b.1), vcombine_p8(b.2, b.3)); + unsafe { + simd_select( + simd_lt::(c, transmute(u8x8::splat(32))), + vqtbx2_p8(a, x, c), + a, + ) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: poly64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: poly64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { + simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { + simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { + simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: poly16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: poly16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn1) +)] +pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { simd_shuffle!(a, b, [1, 3]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { simd_shuffle!(a, b, [1, 3]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { simd_shuffle!(a, b, [1, 3]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { simd_shuffle!(a, b, [1, 3]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_shuffle!(a, b, [1, 3]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { simd_shuffle!(a, b, [1, 3]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { simd_shuffle!(a, b, [1, 3]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: poly64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: poly64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { + simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { + simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { + simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: poly16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: poly16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } +} +#[doc = "Transpose vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(trn2) +)] +pub fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmtst))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe { + let c: int64x1_t = simd_and(a, b); + let d: i64x1 = i64x1::new(0); + simd_ne(c, transmute(d)) + } +} +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmtst))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe { + let c: int64x2_t = simd_and(a, b); + let d: i64x2 = i64x2::new(0, 0); + simd_ne(c, transmute(d)) + } +} +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmtst))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { + unsafe { + let c: poly64x1_t = simd_and(a, b); + let d: i64x1 = i64x1::new(0); + simd_ne(c, transmute(d)) + } +} +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmtst))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { + unsafe { + let c: poly64x2_t = simd_and(a, b); + let d: i64x2 = i64x2::new(0, 0); + simd_ne(c, transmute(d)) + } +} +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmtst))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { + let c: uint64x1_t = simd_and(a, b); + let d: u64x1 = u64x1::new(0); + simd_ne(c, transmute(d)) + } +} +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { - unsafe { simd_sub(a, b) } +pub fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { + let c: uint64x2_t = simd_and(a, b); + let d: u64x2 = u64x2::new(0, 0); + simd_ne(c, transmute(d)) + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"] +#[doc = "Compare bitwise test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe { simd_sub(a, b) } +pub fn vtstd_s64(a: i64, b: i64) -> u64 { + unsafe { transmute(vtst_s64(transmute(a), transmute(b))) } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"] +#[doc = "Compare bitwise test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(tst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sub))] -pub fn vsubd_s64(a: i64, b: i64) -> i64 { - a.wrapping_sub(b) +pub fn vtstd_u64(a: u64, b: u64) -> u64 { + unsafe { transmute(vtst_u64(transmute(a), transmute(b))) } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"] +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(sub))] -pub fn vsubd_u64(a: u64, b: u64) -> u64 { - a.wrapping_sub(b) -} -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubh_f16)"] -#[inline] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(fsub))] -pub fn vsubh_f16(a: f16, b: f16) -> f16 { - a - b +#[cfg_attr(test, assert_instr(suqadd))] +pub fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v8i8" + )] + fn _vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t; + } + unsafe { _vuqadd_s8(a, b) } } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"] +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(ssubl2))] -pub fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { - unsafe { - let c: int16x8_t = simd_cast(vget_high_s8(a)); - let d: int16x8_t = simd_cast(vget_high_s8(b)); - simd_sub(c, d) +#[cfg_attr(test, assert_instr(suqadd))] +pub fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v16i8" + )] + fn _vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t; } + unsafe { _vuqaddq_s8(a, b) } } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"] +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(ssubl2))] -pub fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { - unsafe { - let c: int32x4_t = simd_cast(vget_high_s16(a)); - let d: int32x4_t = simd_cast(vget_high_s16(b)); - simd_sub(c, d) +#[cfg_attr(test, assert_instr(suqadd))] +pub fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v4i16" + )] + fn _vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t; } + unsafe { _vuqadd_s16(a, b) } } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"] +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(ssubl2))] -pub fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { - unsafe { - let c: int64x2_t = simd_cast(vget_high_s32(a)); - let d: int64x2_t = simd_cast(vget_high_s32(b)); - simd_sub(c, d) +#[cfg_attr(test, assert_instr(suqadd))] +pub fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v8i16" + )] + fn _vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t; } + unsafe { _vuqaddq_s16(a, b) } } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"] +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(usubl2))] -pub fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { - unsafe { - let c: uint16x8_t = simd_cast(vget_high_u8(a)); - let d: uint16x8_t = simd_cast(vget_high_u8(b)); - simd_sub(c, d) +#[cfg_attr(test, assert_instr(suqadd))] +pub fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v2i32" + )] + fn _vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t; } + unsafe { _vuqadd_s32(a, b) } } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"] +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(usubl2))] -pub fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { - unsafe { - let c: uint32x4_t = simd_cast(vget_high_u16(a)); - let d: uint32x4_t = simd_cast(vget_high_u16(b)); - simd_sub(c, d) +#[cfg_attr(test, assert_instr(suqadd))] +pub fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v4i32" + )] + fn _vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t; } + unsafe { _vuqaddq_s32(a, b) } } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"] +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(usubl2))] -pub fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { - unsafe { - let c: uint64x2_t = simd_cast(vget_high_u32(a)); - let d: uint64x2_t = simd_cast(vget_high_u32(b)); - simd_sub(c, d) +#[cfg_attr(test, assert_instr(suqadd))] +pub fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v1i64" + )] + fn _vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t; } + unsafe { _vuqadd_s64(a, b) } } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"] +#[doc = "Signed saturating Accumulate of Unsigned value."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"] #[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(ssubw2))] -pub fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { - let c = vget_high_s8(b); - unsafe { simd_sub(a, simd_cast(c)) } +#[cfg_attr(test, assert_instr(suqadd))] +pub fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.v2i64" + )] + fn _vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t; + } + unsafe { _vuqaddq_s64(a, b) } } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"] +#[doc = "Signed saturating accumulate of unsigned value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(ssubw2))] -pub fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { - let c = vget_high_s16(b); - unsafe { simd_sub(a, simd_cast(c)) } +pub fn vuqaddb_s8(a: i8, b: u8) -> i8 { + vget_lane_s8::<0>(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b))) } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"] +#[doc = "Signed saturating accumulate of unsigned value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(ssubw2))] -pub fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { - let c = vget_high_s32(b); - unsafe { simd_sub(a, simd_cast(c)) } +pub fn vuqaddh_s16(a: i16, b: u16) -> i16 { + vget_lane_s16::<0>(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b))) } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"] +#[doc = "Signed saturating accumulate of unsigned value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(usubw2))] -pub fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { - let c = vget_high_u8(b); - unsafe { simd_sub(a, simd_cast(c)) } +pub fn vuqaddd_s64(a: i64, b: u64) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.i64" + )] + fn _vuqaddd_s64(a: i64, b: u64) -> i64; + } + unsafe { _vuqaddd_s64(a, b) } } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"] +#[doc = "Signed saturating accumulate of unsigned value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"] #[inline] #[target_feature(enable = "neon")] +#[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(usubw2))] -pub fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { - let c = vget_high_u16(b); - unsafe { simd_sub(a, simd_cast(c)) } +pub fn vuqadds_s32(a: i32, b: u32) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.suqadd.i32" + )] + fn _vuqadds_s32(a: i32, b: u32) -> i32; + } + unsafe { _vuqadds_s32(a, b) } } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(all(test, target_endian = "little"), assert_instr(usubw2))] -pub fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { - let c = vget_high_u32(b); - unsafe { simd_sub(a, simd_cast(c)) } +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp1) +)] +pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) } } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - vqtbl1_s8(vcombine_s8(a, unsafe { crate::mem::zeroed() }), unsafe { - { - transmute(b) - } - }) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp1) +)] +pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - vqtbl1_u8(vcombine_u8(a, unsafe { crate::mem::zeroed() }), b) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp1) +)] +pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { - vqtbl1_p8(vcombine_p8(a, unsafe { crate::mem::zeroed() }), b) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp1) +)] +pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { - vqtbl1_s8(vcombine_s8(a.0, a.1), vreinterpret_u8_s8(b)) +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { - vqtbl1_u8(vcombine_u8(a.0, a.1), b) +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { - vqtbl1_p8(vcombine_p8(a.0, a.1), b) +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { - let x = int8x16x2_t( - vcombine_s8(a.0, a.1), - vcombine_s8(a.2, unsafe { crate::mem::zeroed() }), - ); - vqtbl2_s8(x, vreinterpret_u8_s8(b)) +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { - let x = uint8x16x2_t( - vcombine_u8(a.0, a.1), - vcombine_u8(a.2, unsafe { crate::mem::zeroed() }), - ); - vqtbl2_u8(x, b) +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { - let x = poly8x16x2_t( - vcombine_p8(a.0, a.1), - vcombine_p8(a.2, unsafe { crate::mem::zeroed() }), - ); - vqtbl2_p8(x, b) +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { - let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3)); - vqtbl2_s8(x, vreinterpret_u8_s8(b)) +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { - let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3)); - vqtbl2_u8(x, b) +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { - let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3)); - vqtbl2_p8(x, b) +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - unsafe { - simd_select( - simd_lt::(c, transmute(i8x8::splat(8))), - vqtbx1_s8( - a, - vcombine_s8(b, crate::mem::zeroed()), - vreinterpret_u8_s8(c), - ), - a, - ) +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - unsafe { - simd_select( - simd_lt::(c, transmute(u8x8::splat(8))), - vqtbx1_u8(a, vcombine_u8(b, crate::mem::zeroed()), c), - a, - ) - } +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { unsafe { - simd_select( - simd_lt::(c, transmute(u8x8::splat(8))), - vqtbx1_p8(a, vcombine_p8(b, crate::mem::zeroed()), c), - a, - ) + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { - unsafe { - simd_select( - simd_lt::(c, transmute(i8x8::splat(16))), - vqtbx1_s8(a, vcombine_s8(b.0, b.1), vreinterpret_u8_s8(c)), - a, - ) - } +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { unsafe { - simd_select( - simd_lt::(c, transmute(u8x8::splat(16))), - vqtbx1_u8(a, vcombine_u8(b.0, b.1), c), - a, - ) + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: poly64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: poly64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { - unsafe { - simd_select( - simd_lt::(c, transmute(u8x8::splat(16))), - vqtbx1_p8(a, vcombine_p8(b.0, b.1), c), - a, - ) - } +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp1) +)] +pub fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) } } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { - let x = int8x16x2_t( - vcombine_s8(b.0, b.1), - vcombine_s8(b.2, unsafe { crate::mem::zeroed() }), - ); +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp1) +)] +pub fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe { - simd_select( - simd_lt::(c, transmute(i8x8::splat(24))), - vqtbx2_s8(a, x, vreinterpret_u8_s8(c)), - a, - ) + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { - let x = uint8x16x2_t( - vcombine_u8(b.0, b.1), - vcombine_u8(b.2, unsafe { crate::mem::zeroed() }), - ); - unsafe { - simd_select( - simd_lt::(c, transmute(u8x8::splat(24))), - vqtbx2_u8(a, x, c), - a, - ) - } +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp1) +)] +pub fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { - let x = poly8x16x2_t( - vcombine_p8(b.0, b.1), - vcombine_p8(b.2, unsafe { crate::mem::zeroed() }), - ); +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp1) +)] +pub fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unsafe { - simd_select( - simd_lt::(c, transmute(u8x8::splat(24))), - vqtbx2_p8(a, x, c), - a, - ) + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { - let x = int8x16x2_t(vcombine_s8(b.0, b.1), vcombine_s8(b.2, b.3)); +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp1) +)] +pub fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unsafe { - simd_select( - simd_lt::(c, transmute(i8x8::splat(32))), - vqtbx2_s8(a, x, vreinterpret_u8_s8(c)), + simd_shuffle!( a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] ) } } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { - let x = uint8x16x2_t(vcombine_u8(b.0, b.1), vcombine_u8(b.2, b.3)); +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp1) +)] +pub fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unsafe { - simd_select( - simd_lt::(c, transmute(u8x8::splat(32))), - vqtbx2_u8(a, x, c), + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = simd_shuffle!( a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { - let x = poly8x16x2_t(vcombine_p8(b.0, b.1), vcombine_p8(b.2, b.3)); +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp1) +)] +pub fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) } +} +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp1) +)] +pub fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unsafe { - simd_select( - simd_lt::(c, transmute(u8x8::splat(32))), - vqtbx2_p8(a, x, c), - a, - ) + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"] -#[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn1) + assert_instr(uzp1) )] -pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) } +pub fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"] #[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn1) + assert_instr(uzp1) )] -pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } +pub fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(uzp1) )] -pub fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(uzp1) )] -pub fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(uzp1) )] -pub fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(uzp1) )] -pub fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(uzp1) )] -pub fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { + simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(uzp1) )] -pub fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(uzp1) )] -pub fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn1) + assert_instr(uzp1) )] -pub fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) } +pub fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn1) + assert_instr(uzp1) )] -pub fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } +pub fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn1) + assert_instr(uzp1) )] -pub fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +pub fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { unsafe { - simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ) + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn1) + assert_instr(uzp1) )] -pub fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) } +pub fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn1) + assert_instr(uzp1) )] -pub fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } +pub fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn1) + assert_instr(uzp1) )] -pub fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) } +pub fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn1) + assert_instr(uzp1) )] -pub fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } +pub fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn1) + assert_instr(uzp1) )] -pub fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { +pub fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { unsafe { simd_shuffle!( a, b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ) + } +} +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp1) +)] +pub fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] ) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr( - all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn1) -)] -pub fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) } -} -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn1) + assert_instr(uzp1) )] -pub fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } +pub fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn1) + assert_instr(uzp1) )] -pub fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) } +pub fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: poly16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: poly16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn1) + assert_instr(uzp1) )] -pub fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } +pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn1) + assert_instr(uzp1) )] -pub fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { +pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { unsafe { - simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ) + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn1) + assert_instr(uzp2) )] -pub fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) } +pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn1) + assert_instr(uzp2) )] -pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) } +pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn2) + assert_instr(uzp2) )] -pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) } +pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn2) + assert_instr(uzp2) )] -pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } +pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { +pub fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unsafe { simd_shuffle!(a, b, [1, 3]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe { simd_shuffle!(a, b, [1, 3]) } +pub fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { +pub fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { unsafe { simd_shuffle!(a, b, [1, 3]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe { simd_shuffle!(a, b, [1, 3]) } +pub fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { +pub fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unsafe { simd_shuffle!(a, b, [1, 3]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe { simd_shuffle!(a, b, [1, 3]) } +pub fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { +pub fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unsafe { simd_shuffle!(a, b, [1, 3]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn2) + assert_instr(zip2) )] -pub fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) } +pub fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn2) + assert_instr(zip2) )] -pub fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } +pub fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_shuffle!(a, b, [1, 3]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn2) + assert_instr(zip2) )] -pub fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +pub fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unsafe { - simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ) + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr( - all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn2) -)] -pub fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) } -} -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn2) + assert_instr(zip2) )] -pub fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } +pub fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { simd_shuffle!(a, b, [1, 3]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn2) + assert_instr(zip2) )] -pub fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) } +pub fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn2) + assert_instr(zip2) )] -pub fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } +pub fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { simd_shuffle!(a, b, [1, 3]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn2) + assert_instr(zip2) )] -pub fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { +pub fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { unsafe { - simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ) + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: poly64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: poly64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn2) + assert_instr(uzp2) )] -pub fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) } +pub fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn2) + assert_instr(uzp2) )] -pub fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } +pub fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn2) + assert_instr(uzp2) )] -pub fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) } +pub fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn2) + assert_instr(uzp2) )] -pub fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } +pub fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn2) + assert_instr(uzp2) )] -pub fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { +pub fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unsafe { simd_shuffle!( a, b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] ) } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn2) + assert_instr(uzp2) )] -pub fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) } +pub fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } -#[doc = "Transpose vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(trn2) + assert_instr(uzp2) )] -pub fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) } -} -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { - unsafe { - let c: int64x1_t = simd_and(a, b); - let d: i64x1 = i64x1::new(0); - simd_ne(c, transmute(d)) - } +pub fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) } } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unsafe { - let c: int64x2_t = simd_and(a, b); - let d: i64x2 = i64x2::new(0, 0); - simd_ne(c, transmute(d)) + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { - unsafe { - let c: poly64x1_t = simd_and(a, b); - let d: i64x1 = i64x1::new(0); - simd_ne(c, transmute(d)) - } +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unsafe { - let c: poly64x2_t = simd_and(a, b); - let d: i64x2 = i64x2::new(0, 0); - simd_ne(c, transmute(d)) + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - unsafe { - let c: uint64x1_t = simd_and(a, b); - let d: u64x1 = u64x1::new(0); - simd_ne(c, transmute(d)) - } +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) } } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unsafe { - let c: uint64x2_t = simd_and(a, b); - let d: u64x2 = u64x2::new(0, 0); - simd_ne(c, transmute(d)) + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Compare bitwise test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tst))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtstd_s64(a: i64, b: i64) -> u64 { - unsafe { transmute(vtst_s64(transmute(a), transmute(b))) } -} -#[doc = "Compare bitwise test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(tst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vtstd_u64(a: u64, b: u64) -> u64 { - unsafe { transmute(vtst_u64(transmute(a), transmute(b))) } +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v8i8" - )] - fn _vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t; +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - unsafe { _vuqadd_s8(a, b) } } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v16i8" - )] - fn _vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t; +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { + simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ) } - unsafe { _vuqaddq_s8(a, b) } } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v4i16" - )] - fn _vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t; +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } - unsafe { _vuqadd_s16(a, b) } } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v8i16" - )] - fn _vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t; - } - unsafe { _vuqaddq_s16(a, b) } +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) } } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v2i32" - )] - fn _vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t; +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vuqadd_s32(a, b) } } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v4i32" - )] - fn _vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t; - } - unsafe { _vuqaddq_s32(a, b) } +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v1i64" - )] - fn _vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t; +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - unsafe { _vuqadd_s64(a, b) } } -#[doc = "Signed saturating Accumulate of Unsigned value."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(suqadd))] -pub fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.v2i64" - )] - fn _vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t; - } - unsafe { _vuqaddq_s64(a, b) } +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) } } -#[doc = "Signed saturating accumulate of unsigned value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vuqaddb_s8(a: i8, b: u8) -> i8 { - vget_lane_s8::<0>(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b))) +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Signed saturating accumulate of unsigned value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vuqaddh_s16(a: i16, b: u16) -> i16 { - vget_lane_s16::<0>(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b))) +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } } -#[doc = "Signed saturating accumulate of unsigned value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vuqaddd_s64(a: i64, b: u64) -> i64 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.i64" - )] - fn _vuqaddd_s64(a: i64, b: u64) -> i64; +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - unsafe { _vuqaddd_s64(a, b) } } -#[doc = "Signed saturating accumulate of unsigned value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vuqadds_s32(a: i32, b: u32) -> i32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.suqadd.i32" - )] - fn _vuqadds_s32(a: i32, b: u32) -> i32; +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { + simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ) } - unsafe { _vuqadds_s32(a, b) } } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"] #[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp1) + assert_instr(uzp2) )] -pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) } +pub fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"] #[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp1) + assert_instr(uzp2) )] -pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } +pub fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) } } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(uzp2) )] -pub fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: poly16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: poly16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(uzp2) )] -pub fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(uzp2) +)] +pub fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Exclusive OR and rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"] +#[inline] +#[target_feature(enable = "neon,sha3")] +#[cfg_attr(test, assert_instr(xar, IMM6 = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] +pub fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(IMM6, 6); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.xar" + )] + fn _vxarq_u64(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t; + } + unsafe { _vxarq_u64(a, b, IMM6 as i64) } +} +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip1) )] -pub fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip1) )] -pub fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip1) )] -pub fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip1) )] -pub fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip1) )] -pub fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { +pub fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unsafe { simd_shuffle!(a, b, [0, 2]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp1) + assert_instr(zip1) )] -pub fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) } +pub fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp1) + assert_instr(zip1) )] -pub fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } +pub fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp1) + assert_instr(zip1) )] -pub fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +pub fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { unsafe { - simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ) + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp1) + assert_instr(zip1) )] -pub fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) } +pub fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp1) + assert_instr(zip1) )] -pub fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } +pub fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp1) + assert_instr(zip1) )] -pub fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) } +pub fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp1) + assert_instr(zip1) )] -pub fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } +pub fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp1) + assert_instr(zip1) )] -pub fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { +pub fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unsafe { simd_shuffle!( a, b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] ) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp1) + assert_instr(zip1) )] -pub fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) } +pub fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp1) + assert_instr(zip1) )] -pub fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } +pub fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp1) + assert_instr(zip1) )] -pub fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) } +pub fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp1) + assert_instr(zip1) )] -pub fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } +pub fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp1) + assert_instr(zip1) )] -pub fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { +pub fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unsafe { - simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ) + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp1) + assert_instr(zip1) )] -pub fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) } +pub fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp1) -)] -pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) } -} -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f16)"] -#[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr( - all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp2) + assert_instr(zip1) )] -pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) } +pub fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"] #[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp2) + assert_instr(zip1) )] -pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } +pub fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip2) + assert_instr(zip1) )] -pub fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe { simd_shuffle!(a, b, [1, 3]) } +pub fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip2) + assert_instr(zip1) )] -pub fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe { simd_shuffle!(a, b, [1, 3]) } +pub fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip2) + assert_instr(zip1) )] -pub fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe { simd_shuffle!(a, b, [1, 3]) } +pub fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip2) + assert_instr(zip1) )] -pub fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe { simd_shuffle!(a, b, [1, 3]) } +pub fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip2) + assert_instr(zip1) )] -pub fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_shuffle!(a, b, [1, 3]) } +pub fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip2) + assert_instr(zip1) )] -pub fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe { simd_shuffle!(a, b, [1, 3]) } +pub fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { + simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip2) + assert_instr(zip1) )] -pub fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - unsafe { simd_shuffle!(a, b, [1, 3]) } +pub fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp2) + assert_instr(zip1) )] -pub fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) } +pub fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp2) + assert_instr(zip1) )] -pub fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } +pub fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp2) + assert_instr(zip1) )] -pub fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe { - simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ) - } +pub fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp2) + assert_instr(zip1) )] -pub fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) } +pub fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp2) + assert_instr(zip1) )] -pub fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } +pub fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp2) + assert_instr(zip1) )] -pub fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) } +pub fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp2) + assert_instr(zip1) )] -pub fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } +pub fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp2) + assert_instr(zip1) )] -pub fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { +pub fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unsafe { - simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ) + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp2) + assert_instr(zip1) )] -pub fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) } +pub fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { simd_shuffle!(a, b, [0, 2]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp2) + assert_instr(zip1) )] -pub fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } +pub fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp2) + assert_instr(zip1) )] -pub fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) } +pub fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp2) + assert_instr(zip1) )] -pub fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } +pub fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp2) + assert_instr(zip1) )] -pub fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { +pub fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { unsafe { simd_shuffle!( a, b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] ) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp2) + assert_instr(zip1) )] -pub fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) } +pub fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(uzp2) + assert_instr(zip1) )] -pub fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) } +pub fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) } } -#[doc = "Exclusive OR and rotate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"] #[inline] -#[target_feature(enable = "neon,sha3")] -#[cfg_attr(test, assert_instr(xar, IMM6 = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] -pub fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(IMM6, 6); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.xar" - )] - fn _vxarq_u64(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t; +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip1) +)] +pub fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: poly16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: poly16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vxarq_u64(a, b, IMM6 as i64) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"] #[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip1) )] -pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) } +pub fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"] #[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip1) )] -pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } +pub fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip1) )] -pub fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { +pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { unsafe { simd_shuffle!(a, b, [0, 2]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip1) )] -pub fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) } +pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: poly64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: poly64x2_t = simd_shuffle!(a, b, [0, 2]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } +pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe { - simd_shuffle!( - a, - b, - [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] - ) - } +pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,fp16")] +#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] +#[cfg(not(target_arch = "arm64ec"))] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) } +pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } +pub fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { simd_shuffle!(a, b, [1, 3]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) } +pub fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } +pub fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { simd_shuffle!(a, b, [1, 3]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { +pub fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { unsafe { - simd_shuffle!( - a, - b, - [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] - ) + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) } +pub fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } +pub fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { + simd_shuffle!( + a, + b, + [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] + ) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) } +pub fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = simd_shuffle!( + a, + b, + [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } +pub fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - unsafe { - simd_shuffle!( - a, - b, - [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] - ) - } +pub fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) } +pub fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) } +pub fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { simd_shuffle!(a, b, [1, 3]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), - assert_instr(zip1) + assert_instr(zip2) )] -pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { - unsafe { simd_shuffle!(a, b, [0, 2]) } +pub fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"] #[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { +pub fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"] #[inline] -#[target_feature(enable = "neon,fp16")] -#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] -#[cfg(not(target_arch = "arm64ec"))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } +pub fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { +pub fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unsafe { simd_shuffle!(a, b, [1, 3]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) } +pub fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { - unsafe { simd_shuffle!(a, b, [1, 3]) } +pub fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } +pub fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +pub fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { unsafe { simd_shuffle!( a, @@ -24961,158 +31171,205 @@ pub fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) } +pub fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + b, + [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } +pub fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe { simd_shuffle!(a, b, [1, 3]) } +pub fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) } +pub fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe { simd_shuffle!(a, b, [1, 3]) } +pub fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } +pub fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_shuffle!(a, b, [1, 3]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { +pub fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unsafe { - simd_shuffle!( - a, - b, - [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] - ) + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { +pub fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } +pub fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { +pub fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { unsafe { simd_shuffle!(a, b, [1, 3]) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) } +pub fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( all(test, not(target_env = "msvc"), target_endian = "little"), assert_instr(zip2) )] -pub fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe { simd_shuffle!(a, b, [1, 3]) } +pub fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( @@ -25120,11 +31377,17 @@ pub fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { assert_instr(zip2) )] pub fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( @@ -25141,8 +31404,37 @@ pub fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { } } #[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = simd_shuffle!( + a, + b, + [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( @@ -25153,8 +31445,27 @@ pub fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) } } #[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: poly16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: poly16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( @@ -25165,8 +31476,27 @@ pub fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) } } #[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr( @@ -25176,3 +31506,21 @@ pub fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { pub fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { unsafe { simd_shuffle!(a, b, [1, 3]) } } +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + all(test, not(target_env = "msvc"), target_endian = "little"), + assert_instr(zip2) +)] +pub fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: poly64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: poly64x2_t = simd_shuffle!(a, b, [1, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} diff --git a/crates/core_arch/src/arm_shared/neon/generated.rs b/crates/core_arch/src/arm_shared/neon/generated.rs index c5bd5c8917..5284a3c44a 100644 --- a/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/crates/core_arch/src/arm_shared/neon/generated.rs @@ -3154,6 +3154,7 @@ pub fn vaddw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { #[doc = "AES single round encryption."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesdq_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(aesd))] @@ -3177,8 +3178,52 @@ pub fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { unsafe { _vaesdq_u8(data, key) } } #[doc = "AES single round encryption."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesdq_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aesd))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aesd" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesd")] + fn _vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t; + } + unsafe { + let data: uint8x16_t = simd_shuffle!( + data, + data, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let key: uint8x16_t = simd_shuffle!( + key, + key, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let ret_val: uint8x16_t = _vaesdq_u8(data, key); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "AES single round encryption."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaeseq_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(aese))] @@ -3201,9 +3246,53 @@ pub fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { } unsafe { _vaeseq_u8(data, key) } } +#[doc = "AES single round encryption."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaeseq_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aese))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aese" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aese")] + fn _vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t; + } + unsafe { + let data: uint8x16_t = simd_shuffle!( + data, + data, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let key: uint8x16_t = simd_shuffle!( + key, + key, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let ret_val: uint8x16_t = _vaeseq_u8(data, key); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} #[doc = "AES inverse mix columns."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesimcq_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(aesimc))] @@ -3226,9 +3315,48 @@ pub fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { } unsafe { _vaesimcq_u8(data) } } +#[doc = "AES inverse mix columns."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesimcq_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aesimc))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aesimc" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesimc")] + fn _vaesimcq_u8(data: uint8x16_t) -> uint8x16_t; + } + unsafe { + let data: uint8x16_t = simd_shuffle!( + data, + data, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let ret_val: uint8x16_t = _vaesimcq_u8(data); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} #[doc = "AES mix columns."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesmcq_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(aesmc))] @@ -3251,6 +3379,44 @@ pub fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { } unsafe { _vaesmcq_u8(data) } } +#[doc = "AES mix columns."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesmcq_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(aesmc))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.aesmc" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.aesmc")] + fn _vaesmcq_u8(data: uint8x16_t) -> uint8x16_t; + } + unsafe { + let data: uint8x16_t = simd_shuffle!( + data, + data, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + let ret_val: uint8x16_t = _vaesmcq_u8(data); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s8)"] #[inline] @@ -7594,6 +7760,7 @@ pub fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { #[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] @@ -7611,8 +7778,34 @@ pub fn vcombine_f16(a: float16x4_t, b: float16x4_t) -> float16x8_t { unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } } #[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "stdarch_neon_fp16", since = "1.94.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(nop))] +pub fn vcombine_f16(a: float16x4_t, b: float16x4_t) -> float16x8_t { + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float16x8_t = simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7628,8 +7821,32 @@ pub fn vcombine_f32(a: float32x2_t, b: float32x2_t) -> float32x4_t { unsafe { simd_shuffle!(a, b, [0, 1, 2, 3]) } } #[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcombine_f32(a: float32x2_t, b: float32x2_t) -> float32x4_t { + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float32x4_t = simd_shuffle!(a, b, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7645,8 +7862,37 @@ pub fn vcombine_s8(a: int8x8_t, b: int8x8_t) -> int8x16_t { unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } } #[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcombine_s8(a: int8x8_t, b: int8x8_t) -> int8x16_t { + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7662,8 +7908,32 @@ pub fn vcombine_s16(a: int16x4_t, b: int16x4_t) -> int16x8_t { unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } } #[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcombine_s16(a: int16x4_t, b: int16x4_t) -> int16x8_t { + unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int16x8_t = simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7679,8 +7949,32 @@ pub fn vcombine_s32(a: int32x2_t, b: int32x2_t) -> int32x4_t { unsafe { simd_shuffle!(a, b, [0, 1, 2, 3]) } } #[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcombine_s32(a: int32x2_t, b: int32x2_t) -> int32x4_t { + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int32x4_t = simd_shuffle!(a, b, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7696,8 +7990,30 @@ pub fn vcombine_s64(a: int64x1_t, b: int64x1_t) -> int64x2_t { unsafe { simd_shuffle!(a, b, [0, 1]) } } #[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcombine_s64(a: int64x1_t, b: int64x1_t) -> int64x2_t { + unsafe { + let ret_val: int64x2_t = simd_shuffle!(a, b, [0, 1]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7713,8 +8029,37 @@ pub fn vcombine_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t { unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } } #[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcombine_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t { + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7730,8 +8075,32 @@ pub fn vcombine_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x8_t { unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } } #[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcombine_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x8_t { + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint16x8_t = simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7747,8 +8116,32 @@ pub fn vcombine_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x4_t { unsafe { simd_shuffle!(a, b, [0, 1, 2, 3]) } } #[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u32)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcombine_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x4_t { + unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint32x4_t = simd_shuffle!(a, b, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7764,8 +8157,30 @@ pub fn vcombine_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x2_t { unsafe { simd_shuffle!(a, b, [0, 1]) } } #[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcombine_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x2_t { + unsafe { + let ret_val: uint64x2_t = simd_shuffle!(a, b, [0, 1]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7781,8 +8196,37 @@ pub fn vcombine_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x16_t { unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) } } #[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p8)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcombine_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x16_t { + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = + simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } +} +#[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7798,8 +8242,32 @@ pub fn vcombine_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { unsafe { simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]) } } #[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcombine_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { + unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: poly16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: poly16x8_t = simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7814,10 +8282,30 @@ pub fn vcombine_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { pub fn vcombine_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x2_t { unsafe { simd_shuffle!(a, b, [0, 1]) } } +#[doc = "Join two smaller vectors into a single larger vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p64)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vcombine_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x2_t { + unsafe { + let ret_val: poly64x2_t = simd_shuffle!(a, b, [0, 1]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7839,36 +8327,8 @@ pub fn vcreate_f16(a: u64) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vcreate_f16(a: u64) -> float16x4_t { - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7888,34 +8348,8 @@ pub fn vcreate_f32(a: u64) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vcreate_f32(a: u64) -> float32x2_t { - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7935,34 +8369,8 @@ pub fn vcreate_s8(a: u64) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vcreate_s8(a: u64) -> int8x8_t { - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7982,34 +8390,8 @@ pub fn vcreate_s16(a: u64) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vcreate_s16(a: u64) -> int16x4_t { - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -8029,31 +8411,6 @@ pub fn vcreate_s32(a: u64) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vcreate_s32(a: u64) -> int32x2_t { - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s64)"] #[inline] #[target_feature(enable = "neon")] @@ -8077,7 +8434,6 @@ pub fn vcreate_s64(a: u64) -> int64x1_t { #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -8097,34 +8453,8 @@ pub fn vcreate_u8(a: u64) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vcreate_u8(a: u64) -> uint8x8_t { - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -8144,34 +8474,8 @@ pub fn vcreate_u16(a: u64) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vcreate_u16(a: u64) -> uint16x4_t { - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -8191,31 +8495,6 @@ pub fn vcreate_u32(a: u64) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vcreate_u32(a: u64) -> uint32x2_t { - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u64)"] #[inline] #[target_feature(enable = "neon")] @@ -8239,7 +8518,6 @@ pub fn vcreate_u64(a: u64) -> uint64x1_t { #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -8259,34 +8537,8 @@ pub fn vcreate_p8(a: u64) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vcreate_p8(a: u64) -> poly8x8_t { - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -8306,31 +8558,6 @@ pub fn vcreate_p16(a: u64) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vcreate_p16(a: u64) -> poly16x4_t { - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p64)"] #[inline] #[target_feature(enable = "neon,aes")] @@ -9532,6 +9759,7 @@ pub fn vdotq_laneq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x1 #[doc = "Dot product arithmetic (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_s32)"] #[inline] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] @@ -9559,8 +9787,45 @@ pub fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { unsafe { _vdot_s32(a, b, c) } } #[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_s32)"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sdot) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v2i32.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sdot.v2i32.v8i8" + )] + fn _vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t; + } + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: int8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x2_t = _vdot_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Dot product arithmetic (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_s32)"] #[inline] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] @@ -9588,8 +9853,47 @@ pub fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { unsafe { _vdotq_s32(a, b, c) } } #[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_s32)"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sdot) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sdot.v4i32.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sdot.v4i32.v16i8" + )] + fn _vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; + } + unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let c: int8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x4_t = _vdotq_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Dot product arithmetic (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_u32)"] #[inline] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] @@ -9617,8 +9921,45 @@ pub fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { unsafe { _vdot_u32(a, b, c) } } #[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_u32)"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(udot) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v2i32.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.udot.v2i32.v8i8" + )] + fn _vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t; + } + unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x2_t = _vdot_u32(a, b, c); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Dot product arithmetic (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)"] #[inline] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] @@ -9645,9 +9986,48 @@ pub fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { } unsafe { _vdotq_u32(a, b, c) } } +#[doc = "Dot product arithmetic (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,dotprod")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(udot) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_dotprod", issue = "117224") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.udot.v4i32.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.udot.v4i32.v16i8" + )] + fn _vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t; + } + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint32x4_t = _vdotq_u32(a, b, c); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] @@ -9671,8 +10051,9 @@ pub fn vdup_lane_f16(a: float16x4_t) -> float16x4_t { unsafe { simd_shuffle!(a, a, [N as u32; 4]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] @@ -9691,59 +10072,74 @@ pub fn vdup_lane_f16(a: float16x4_t) -> float16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg(not(target_arch = "arm64ec"))] -pub fn vdupq_lane_f16(a: float16x4_t) -> float16x8_t { +pub fn vdup_lane_f16(a: float16x4_t) -> float16x4_t { static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32; 8]) } + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float16x4_t = simd_shuffle!(a, a, [N as u32; 4]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(dup, N = 2) )] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vdupq_lane_f16(a: float16x4_t) -> float16x8_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32; 8]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(dup, N = 2) )] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vdupq_lane_f16(a: float16x4_t) -> float16x8_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float16x8_t = simd_shuffle!(a, a, [N as u32; 8]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] @@ -9760,13 +10156,14 @@ pub fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { +pub fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { static_assert_uimm_bits!(N, 1); unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] @@ -9783,13 +10180,18 @@ pub fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { +pub fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] @@ -9806,13 +10208,14 @@ pub fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { +pub fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] @@ -9829,19 +10232,24 @@ pub fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { +pub fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(dup, N = 1) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -9852,19 +10260,20 @@ pub fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32; 4]) } +pub fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(dup, N = 1) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -9875,19 +10284,24 @@ pub fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32; 4]) } +pub fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(dup, N = 1) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -9898,19 +10312,20 @@ pub fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32; 4]) } +pub fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(dup, N = 1) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -9921,19 +10336,24 @@ pub fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32; 8]) } +pub fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { + static_assert_uimm_bits!(N, 1); + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(dup, N = 1) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -9944,19 +10364,20 @@ pub fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32; 8]) } +pub fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(dup, N = 1) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -9967,19 +10388,24 @@ pub fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32; 8]) } +pub fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(N, 1); + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(dup, N = 1) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -9990,19 +10416,20 @@ pub fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32; 8]) } +pub fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(dup, N = 1) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -10013,19 +10440,24 @@ pub fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32; 8]) } +pub fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 1); + unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(dup, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -10036,19 +10468,20 @@ pub fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32; 8]) } +pub fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32; 4]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(dup, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -10059,19 +10492,24 @@ pub fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32; 16]) } +pub fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly16x4_t = simd_shuffle!(a, a, [N as u32; 4]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(dup, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -10082,19 +10520,20 @@ pub fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32; 16]) } +pub fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32; 4]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(dup, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -10105,19 +10544,24 @@ pub fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32; 16]) } +pub fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int16x4_t = simd_shuffle!(a, a, [N as u32; 4]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 0) + assert_instr(dup, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -10128,19 +10572,20 @@ pub fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { - static_assert!(N == 0); - a +pub fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32; 4]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 0) + assert_instr(dup, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -10151,66 +10596,73 @@ pub fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { - static_assert!(N == 0); - a +pub fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint16x4_t = simd_shuffle!(a, a, [N as u32; 4]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(dup, N = 2) )] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vdup_laneq_f16(a: float16x8_t) -> float16x4_t { - static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32; 4]) } +pub fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32; 8]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 4) + assert_instr(dup, N = 2) )] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vdupq_laneq_f16(a: float16x8_t) -> float16x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32; 8]) } +pub fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly16x8_t = simd_shuffle!(a, a, [N as u32; 8]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(dup, N = 2) @@ -10224,16 +10676,17 @@ pub fn vdupq_laneq_f16(a: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { +pub fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } + unsafe { simd_shuffle!(a, a, [N as u32; 8]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(dup, N = 2) @@ -10247,16 +10700,21 @@ pub fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { +pub fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } + unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int16x8_t = simd_shuffle!(a, a, [N as u32; 8]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(dup, N = 2) @@ -10270,16 +10728,17 @@ pub fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { +pub fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } + unsafe { simd_shuffle!(a, a, [N as u32; 8]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(dup, N = 2) @@ -10293,19 +10752,24 @@ pub fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { +pub fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint16x8_t = simd_shuffle!(a, a, [N as u32; 8]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(dup, N = 4) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -10316,19 +10780,20 @@ pub fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } +pub fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shuffle!(a, a, [N as u32; 8]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 2) + assert_instr(dup, N = 4) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -10339,16 +10804,21 @@ pub fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } +pub fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = simd_shuffle!(a, a, [N as u32; 8]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(dup, N = 4) @@ -10362,16 +10832,17 @@ pub fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { +pub fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32; 4]) } + unsafe { simd_shuffle!(a, a, [N as u32; 8]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(dup, N = 4) @@ -10385,16 +10856,21 @@ pub fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { +pub fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32; 4]) } -} -#[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u16)"] + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = simd_shuffle!(a, a, [N as u32; 8]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(dup, N = 4) @@ -10408,16 +10884,17 @@ pub fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { +pub fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32; 4]) } + unsafe { simd_shuffle!(a, a, [N as u32; 8]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(dup, N = 4) @@ -10431,16 +10908,21 @@ pub fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { +pub fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32; 8]) } + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = simd_shuffle!(a, a, [N as u32; 8]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(dup, N = 4) @@ -10454,16 +10936,17 @@ pub fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { +pub fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32; 8]) } + unsafe { simd_shuffle!(a, a, [N as u32; 16]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(dup, N = 4) @@ -10477,19 +10960,28 @@ pub fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { +pub fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { static_assert_uimm_bits!(N, 3); - unsafe { simd_shuffle!(a, a, [N as u32; 8]) } + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = simd_shuffle!(a, a, [N as u32; 16]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(dup, N = 4) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -10500,19 +10992,20 @@ pub fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 4); - unsafe { simd_shuffle!(a, a, [N as u32; 8]) } +pub fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shuffle!(a, a, [N as u32; 16]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(dup, N = 4) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -10523,19 +11016,28 @@ pub fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { - static_assert_uimm_bits!(N, 4); - unsafe { simd_shuffle!(a, a, [N as u32; 8]) } +pub fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = simd_shuffle!(a, a, [N as u32; 16]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(dup, N = 4) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -10546,19 +11048,20 @@ pub fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 4); - unsafe { simd_shuffle!(a, a, [N as u32; 8]) } +pub fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shuffle!(a, a, [N as u32; 16]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(dup, N = 4) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -10569,19 +11072,27 @@ pub fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 4); - unsafe { simd_shuffle!(a, a, [N as u32; 16]) } +pub fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = simd_shuffle!(a, a, [N as u32; 16]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(nop, N = 0) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -10592,19 +11103,19 @@ pub fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 4); - unsafe { simd_shuffle!(a, a, [N as u32; 16]) } +pub fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { + static_assert!(N == 0); + a } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 8) + assert_instr(nop, N = 0) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -10615,119 +11126,134 @@ pub fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 4); - unsafe { simd_shuffle!(a, a, [N as u32; 16]) } +pub fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { + static_assert!(N == 0); + a } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 1) + assert_instr(dup, N = 4) )] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { - static_assert_uimm_bits!(N, 1); - unsafe { transmute(vgetq_lane_s64::(a)) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vdup_laneq_f16(a: float16x8_t) -> float16x4_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shuffle!(a, a, [N as u32; 4]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 1) + assert_instr(dup, N = 4) )] #[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { - static_assert_uimm_bits!(N, 1); - unsafe { transmute(vgetq_lane_u64::(a)) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vdup_laneq_f16(a: float16x8_t) -> float16x4_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x4_t = simd_shuffle!(a, a, [N as u32; 4]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Create a new vector with all lanes set to a value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_f16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vdup_n_f16(a: f16) -> float16x4_t { - float16x4_t::splat(a) -} -#[doc = "Create a new vector with all lanes set to a value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_f16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + not(target_arch = "arm"), + stable(feature = "stdarch_neon_fp16", since = "1.94.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub fn vdupq_n_f16(a: f16) -> float16x8_t { - float16x8_t::splat(a) +pub fn vdupq_laneq_f16(a: float16x8_t) -> float16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shuffle!(a, a, [N as u32; 8]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_f32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_n_f32(value: f32) -> float32x2_t { - float32x2_t::splat(value) +#[cfg(not(target_arch = "arm64ec"))] +pub fn vdupq_laneq_f16(a: float16x8_t) -> float16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = simd_shuffle!(a, a, [N as u32; 8]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_p16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10736,19 +11262,22 @@ pub fn vdup_n_f32(value: f32) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_n_p16(value: p16) -> poly16x4_t { - poly16x4_t::splat(value) +pub fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_p8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10757,19 +11286,26 @@ pub fn vdup_n_p16(value: p16) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_n_p8(value: p8) -> poly8x8_t { - poly8x8_t::splat(value) +pub fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10778,19 +11314,22 @@ pub fn vdup_n_p8(value: p8) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_n_s16(value: i16) -> int16x4_t { - int16x4_t::splat(value) +pub fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10799,19 +11338,26 @@ pub fn vdup_n_s16(value: i16) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_n_s32(value: i32) -> int32x2_t { - int32x2_t::splat(value) +pub fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_s64)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmov) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10820,19 +11366,22 @@ pub fn vdup_n_s32(value: i32) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_n_s64(value: i64) -> int64x1_t { - int64x1_t::splat(value) +pub fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10841,19 +11390,26 @@ pub fn vdup_n_s64(value: i64) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_n_s8(value: i8) -> int8x8_t { - int8x8_t::splat(value) +pub fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint32x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_u16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10862,19 +11418,22 @@ pub fn vdup_n_s8(value: i8) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_n_u16(value: u16) -> uint16x4_t { - uint16x4_t::splat(value) +pub fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_u32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10883,19 +11442,26 @@ pub fn vdup_n_u16(value: u16) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_n_u32(value: u32) -> uint32x2_t { - uint32x2_t::splat(value) +pub fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_u64)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmov) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10904,19 +11470,22 @@ pub fn vdup_n_u32(value: u32) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_n_u64(value: u64) -> uint64x1_t { - uint64x1_t::splat(value) +pub fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_u8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10925,19 +11494,26 @@ pub fn vdup_n_u64(value: u64) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdup_n_u8(value: u8) -> uint8x8_t { - uint8x8_t::splat(value) +pub fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_f32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10946,19 +11522,22 @@ pub fn vdup_n_u8(value: u8) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_n_f32(value: f32) -> float32x4_t { - float32x4_t::splat(value) +pub fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_p16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10967,19 +11546,26 @@ pub fn vdupq_n_f32(value: f32) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_n_p16(value: p16) -> poly16x8_t { - poly16x8_t::splat(value) +pub fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint32x4_t = simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_p8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -10988,19 +11574,22 @@ pub fn vdupq_n_p16(value: p16) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_n_p8(value: p8) -> poly8x16_t { - poly8x16_t::splat(value) +pub fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shuffle!(a, a, [N as u32; 4]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11009,19 +11598,26 @@ pub fn vdupq_n_p8(value: p8) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_n_s16(value: i16) -> int16x8_t { - int16x8_t::splat(value) +pub fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x4_t = simd_shuffle!(a, a, [N as u32; 4]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11030,19 +11626,22 @@ pub fn vdupq_n_s16(value: i16) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_n_s32(value: i32) -> int32x4_t { - int32x4_t::splat(value) +pub fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shuffle!(a, a, [N as u32; 4]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_s64)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11051,19 +11650,26 @@ pub fn vdupq_n_s32(value: i32) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_n_s64(value: i64) -> int64x2_t { - int64x2_t::splat(value) +pub fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x4_t = simd_shuffle!(a, a, [N as u32; 4]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11072,19 +11678,22 @@ pub fn vdupq_n_s64(value: i64) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_n_s8(value: i8) -> int8x16_t { - int8x16_t::splat(value) +pub fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shuffle!(a, a, [N as u32; 4]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_u16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11093,19 +11702,26 @@ pub fn vdupq_n_s8(value: i8) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_n_u16(value: u16) -> uint16x8_t { - uint16x8_t::splat(value) +pub fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x4_t = simd_shuffle!(a, a, [N as u32; 4]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_u32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11114,19 +11730,22 @@ pub fn vdupq_n_u16(value: u16) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_n_u32(value: u32) -> uint32x4_t { - uint32x4_t::splat(value) +pub fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shuffle!(a, a, [N as u32; 8]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_u64)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11135,19 +11754,26 @@ pub fn vdupq_n_u32(value: u32) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_n_u64(value: u64) -> uint64x2_t { - uint64x2_t::splat(value) +pub fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = simd_shuffle!(a, a, [N as u32; 8]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_u8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11156,19 +11782,22 @@ pub fn vdupq_n_u64(value: u64) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_n_u8(value: u8) -> uint8x16_t { - uint8x16_t::splat(value) +pub fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shuffle!(a, a, [N as u32; 8]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_f32_vfp4)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11177,19 +11806,26 @@ pub fn vdupq_n_u8(value: u8) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -fn vdup_n_f32_vfp4(value: f32) -> float32x2_t { - float32x2_t::splat(value) +pub fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = simd_shuffle!(a, a, [N as u32; 8]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_f32_vfp4)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(dup, N = 4) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11198,18 +11834,20 @@ fn vdup_n_f32_vfp4(value: f32) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -fn vdupq_n_f32_vfp4(value: f32) -> float32x4_t { - float32x4_t::splat(value) +pub fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shuffle!(a, a, [N as u32; 8]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 0) + assert_instr(dup, N = 4) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -11220,19 +11858,24 @@ fn vdupq_n_f32_vfp4(value: f32) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { - static_assert!(N == 0); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +pub fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = simd_shuffle!(a, a, [N as u32; 8]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 0) + assert_instr(dup, N = 8) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -11243,19 +11886,20 @@ pub fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { - static_assert!(N == 0); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +pub fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 4); + unsafe { simd_shuffle!(a, a, [N as u32; 8]) } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(dup, N = 8) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -11266,19 +11910,25 @@ pub fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +pub fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 4); + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = simd_shuffle!(a, a, [N as u32; 8]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } #[doc = "Set all vector lanes to the same value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup, N = 1) + assert_instr(dup, N = 8) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -11289,20 +11939,22 @@ pub fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } +pub fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { + static_assert_uimm_bits!(N, 4); + unsafe { simd_shuffle!(a, a, [N as u32; 8]) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11311,19 +11963,27 @@ pub fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe { simd_xor(a, b) } +pub fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { + static_assert_uimm_bits!(N, 4); + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = simd_shuffle!(a, a, [N as u32; 8]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11332,19 +11992,22 @@ pub fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe { simd_xor(a, b) } +pub fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 4); + unsafe { simd_shuffle!(a, a, [N as u32; 8]) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11353,19 +12016,27 @@ pub fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe { simd_xor(a, b) } +pub fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 4); + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = simd_shuffle!(a, a, [N as u32; 8]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11374,19 +12045,22 @@ pub fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe { simd_xor(a, b) } +pub fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { simd_shuffle!(a, a, [N as u32; 16]) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11395,19 +12069,31 @@ pub fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe { simd_xor(a, b) } +pub fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = simd_shuffle!(a, a, [N as u32; 16]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11416,19 +12102,22 @@ pub fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe { simd_xor(a, b) } +pub fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { simd_shuffle!(a, a, [N as u32; 16]) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s64)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11437,19 +12126,31 @@ pub fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe { simd_xor(a, b) } +pub fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = simd_shuffle!(a, a, [N as u32; 16]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s64)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11458,19 +12159,22 @@ pub fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe { simd_xor(a, b) } +pub fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { simd_shuffle!(a, a, [N as u32; 16]) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(dup, N = 8) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11479,19 +12183,30 @@ pub fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_xor(a, b) } +pub fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = simd_shuffle!(a, a, [N as u32; 16]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(nop, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11500,19 +12215,21 @@ pub fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe { simd_xor(a, b) } +pub fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { + static_assert_uimm_bits!(N, 1); + unsafe { transmute(vgetq_lane_s64::(a)) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(nop, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11521,39 +12238,51 @@ pub fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_xor(a, b) } +pub fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { + static_assert_uimm_bits!(N, 1); + unsafe { transmute(vgetq_lane_u64::(a)) } } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u16)"] +#[doc = "Create a new vector with all lanes set to a value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_f16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(dup) )] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vdup_n_f16(a: f16) -> float16x4_t { + float16x4_t::splat(a) +} +#[doc = "Create a new vector with all lanes set to a value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_f16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) )] -pub fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_xor(a, b) } +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vdupq_n_f16(a: f16) -> float16x8_t { + float16x8_t::splat(a) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -11563,18 +12292,18 @@ pub fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_xor(a, b) } +pub fn vdup_n_f32(value: f32) -> float32x2_t { + float32x2_t::splat(value) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_p16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -11584,18 +12313,18 @@ pub fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_xor(a, b) } +pub fn vdup_n_p16(value: p16) -> poly16x4_t { + poly16x4_t::splat(value) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u64)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -11605,18 +12334,18 @@ pub fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - unsafe { simd_xor(a, b) } +pub fn vdup_n_p8(value: p8) -> poly8x8_t { + poly8x8_t::splat(value) } -#[doc = "Vector bitwise exclusive or (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u64)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(eor) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -11626,44 +12355,40 @@ pub fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe { simd_xor(a, b) } +pub fn vdup_n_s16(value: i16) -> int16x4_t { + int16x4_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_s32)"] #[inline] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vext_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]) } +pub fn vdup_n_s32(value: i32) -> int32x2_t { + int32x2_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(fmov) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11672,21 +12397,19 @@ pub fn vext_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1]) } +pub fn vdup_n_s64(value: i64) -> int64x1_t { + int64x1_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11695,21 +12418,19 @@ pub fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1]) } +pub fn vdup_n_s8(value: i8) -> int8x8_t { + int8x8_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11718,23 +12439,19 @@ pub fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1]) } +pub fn vdup_n_u16(value: u16) -> uint16x4_t { + uint16x4_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 0) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11743,23 +12460,19 @@ pub fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t { - static_assert!(N == 0); - a +pub fn vdup_n_u32(value: u32) -> uint32x2_t { + uint32x2_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, N = 0) + assert_instr(fmov) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11768,21 +12481,19 @@ pub unsafe fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vext_u64(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_t { - static_assert!(N == 0); - a +pub fn vdup_n_u64(value: u64) -> uint64x1_t { + uint64x1_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s8)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11791,36 +12502,19 @@ pub unsafe fn vext_u64(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { - simd_shuffle!( - a, - b, - [ - N as u32, - N as u32 + 1, - N as u32 + 2, - N as u32 + 3, - N as u32 + 4, - N as u32 + 5, - N as u32 + 6, - N as u32 + 7 - ] - ) - } +pub fn vdup_n_u8(value: u8) -> uint8x8_t { + uint8x8_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11829,36 +12523,19 @@ pub fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { - simd_shuffle!( - a, - b, - [ - N as u32, - N as u32 + 1, - N as u32 + 2, - N as u32 + 3, - N as u32 + 4, - N as u32 + 5, - N as u32 + 6, - N as u32 + 7 - ] - ) - } +pub fn vdupq_n_f32(value: f32) -> float32x4_t { + float32x4_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u8)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_p16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11867,36 +12544,19 @@ pub fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { - simd_shuffle!( - a, - b, - [ - N as u32, - N as u32 + 1, - N as u32 + 2, - N as u32 + 3, - N as u32 + 4, - N as u32 + 5, - N as u32 + 6, - N as u32 + 7 - ] - ) - } +pub fn vdupq_n_p16(value: p16) -> poly16x8_t { + poly16x8_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11905,36 +12565,19 @@ pub fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { - simd_shuffle!( - a, - b, - [ - N as u32, - N as u32 + 1, - N as u32 + 2, - N as u32 + 3, - N as u32 + 4, - N as u32 + 5, - N as u32 + 6, - N as u32 + 7 - ] - ) - } +pub fn vdupq_n_p8(value: p8) -> poly8x16_t { + poly8x16_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p8)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11943,36 +12586,19 @@ pub fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { - simd_shuffle!( - a, - b, - [ - N as u32, - N as u32 + 1, - N as u32 + 2, - N as u32 + 3, - N as u32 + 4, - N as u32 + 5, - N as u32 + 6, - N as u32 + 7 - ] - ) - } +pub fn vdupq_n_s16(value: i16) -> int16x8_t { + int16x8_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -11981,75 +12607,40 @@ pub fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { - simd_shuffle!( - a, - b, - [ - N as u32, - N as u32 + 1, - N as u32 + 2, - N as u32 + 3, - N as u32 + 4, - N as u32 + 5, - N as u32 + 6, - N as u32 + 7 - ] - ) - } +pub fn vdupq_n_s32(value: i32) -> int32x4_t { + int32x4_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_s64)"] #[inline] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 7) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vextq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { - simd_shuffle!( - a, - b, - [ - N as u32, - N as u32 + 1, - N as u32 + 2, - N as u32 + 3, - N as u32 + 4, - N as u32 + 5, - N as u32 + 6, - N as u32 + 7 - ] - ) - } +pub fn vdupq_n_s64(value: i64) -> int64x2_t { + int64x2_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12058,21 +12649,19 @@ pub fn vextq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]) } +pub fn vdupq_n_s8(value: i8) -> int8x16_t { + int8x16_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12081,21 +12670,19 @@ pub fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]) } +pub fn vdupq_n_u16(value: u16) -> uint16x8_t { + uint16x8_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12104,21 +12691,19 @@ pub fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]) } +pub fn vdupq_n_u32(value: u32) -> uint32x4_t { + uint32x4_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12127,21 +12712,19 @@ pub fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]) } +pub fn vdupq_n_u64(value: u64) -> uint64x2_t { + uint64x2_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12150,21 +12733,19 @@ pub fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]) } +pub fn vdupq_n_u8(value: u8) -> uint8x16_t { + uint8x16_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_f32_vfp4)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 3) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12173,21 +12754,19 @@ pub fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 2); - unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]) } +fn vdup_n_f32_vfp4(value: f32) -> float32x2_t { + float32x2_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s64)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_f32_vfp4)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(dup) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12196,21 +12775,21 @@ pub fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1]) } +fn vdupq_n_f32_vfp4(value: f32) -> float32x4_t { + float32x4_t::splat(value) } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u64)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 1) + assert_instr(dup, N = 0) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12219,21 +12798,22 @@ pub fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 1); - unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1]) } +pub fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { + static_assert!(N == 0); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 15) + assert_instr(dup, N = 0) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12242,44 +12822,25 @@ pub fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 4); +pub fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { + static_assert!(N == 0); unsafe { - simd_shuffle!( - a, - b, - [ - N as u32, - N as u32 + 1, - N as u32 + 2, - N as u32 + 3, - N as u32 + 4, - N as u32 + 5, - N as u32 + 6, - N as u32 + 7, - N as u32 + 8, - N as u32 + 9, - N as u32 + 10, - N as u32 + 11, - N as u32 + 12, - N as u32 + 13, - N as u32 + 14, - N as u32 + 15 - ] - ) + let ret_val: int64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 15) + assert_instr(dup, N = 0) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12288,44 +12849,22 @@ pub fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 4); - unsafe { - simd_shuffle!( - a, - b, - [ - N as u32, - N as u32 + 1, - N as u32 + 2, - N as u32 + 3, - N as u32 + 4, - N as u32 + 5, - N as u32 + 6, - N as u32 + 7, - N as u32 + 8, - N as u32 + 9, - N as u32 + 10, - N as u32 + 11, - N as u32 + 12, - N as u32 + 13, - N as u32 + 14, - N as u32 + 15 - ] - ) - } +pub fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { + static_assert!(N == 0); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } -#[doc = "Extract vector from pair of vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p8)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext, N = 15) + assert_instr(dup, N = 0) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12334,87 +12873,77 @@ pub fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 4); +pub fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { + static_assert!(N == 0); unsafe { - simd_shuffle!( - a, - b, - [ - N as u32, - N as u32 + 1, - N as u32 + 2, - N as u32 + 3, - N as u32 + 4, - N as u32 + 5, - N as u32 + 6, - N as u32 + 7, - N as u32 + 8, - N as u32 + 9, - N as u32 + 10, - N as u32 + 11, - N as u32 + 12, - N as u32 + 13, - N as u32 + 14, - N as u32 + 15 - ] - ) + let ret_val: uint64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } -#[doc = "Floating-point fused Multiply-Add to accumulator (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s64)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + assert_instr(dup, N = 1) )] -#[target_feature(enable = "neon,fp16")] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vfma_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { - unsafe { simd_fma(b, c, a) } +pub fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } -#[doc = "Floating-point fused Multiply-Add to accumulator (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f16)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s64)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + assert_instr(dup, N = 1) )] -#[target_feature(enable = "neon,fp16")] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vfmaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { - unsafe { simd_fma(b, c, a) } -} -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f32)"] -#[inline] +pub fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } +} +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u64)"] +#[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + assert_instr(dup, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12423,19 +12952,22 @@ pub fn vfmaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - unsafe { simd_fma(b, c, a) } +pub fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) } } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f32)"] +#[doc = "Set all vector lanes to the same value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + assert_instr(dup, N = 1) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12444,18 +12976,23 @@ pub fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - unsafe { simd_fma(b, c, a) } +pub fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint64x2_t = simd_shuffle!(a, a, [N as u32, N as u32]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f32)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -12465,18 +13002,18 @@ pub fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - vfma_f32(a, b, vdup_n_f32_vfp4(c)) +pub fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { simd_xor(a, b) } } -#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f32)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmla) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -12486,70 +13023,60 @@ pub fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - vfmaq_f32(a, b, vdupq_n_f32_vfp4(c)) +pub fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { simd_xor(a, b) } } -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f16)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) + assert_instr(eor) )] -#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vfms_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { - unsafe { - let b: float16x4_t = simd_neg(b); - vfma_f16(a, b, c) - } +pub fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { simd_xor(a, b) } } -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f16)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) + assert_instr(eor) )] -#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vfmsq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { - unsafe { - let b: float16x8_t = simd_neg(b); - vfmaq_f16(a, b, c) - } +pub fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { simd_xor(a, b) } } -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f32)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -12559,21 +13086,18 @@ pub fn vfmsq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - unsafe { - let b: float32x2_t = simd_neg(b); - vfma_f32(a, b, c) - } +pub fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { simd_xor(a, b) } } -#[doc = "Floating-point fused multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f32)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -12583,21 +13107,18 @@ pub fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - unsafe { - let b: float32x4_t = simd_neg(b); - vfmaq_f32(a, b, c) - } +pub fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { simd_xor(a, b) } } -#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f32)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s64)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -12607,18 +13128,18 @@ pub fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - vfms_f32(a, b, vdup_n_f32_vfp4(c)) +pub fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { simd_xor(a, b) } } -#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f32)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s64)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmls) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -12628,56 +13149,60 @@ pub fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - vfmsq_f32(a, b, vdupq_n_f32_vfp4(c)) +pub fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { simd_xor(a, b) } } -#[doc = "Duplicate vector element to vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_f16)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(nop))] -pub fn vget_high_f16(a: float16x8_t) -> float16x4_t { - unsafe { simd_shuffle!(a, a, [4, 5, 6, 7]) } +pub fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_xor(a, b) } } -#[doc = "Duplicate vector element to vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_f16)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(eor) +)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(nop))] -pub fn vget_low_f16(a: float16x8_t) -> float16x4_t { - unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } +pub fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { simd_xor(a, b) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_f32)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -12687,18 +13212,18 @@ pub fn vget_low_f16(a: float16x8_t) -> float16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_high_f32(a: float32x4_t) -> float32x2_t { - unsafe { simd_shuffle!(a, a, [2, 3]) } +pub fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_xor(a, b) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_p16)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -12708,18 +13233,18 @@ pub fn vget_high_f32(a: float32x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_high_p16(a: poly16x8_t) -> poly16x4_t { - unsafe { simd_shuffle!(a, a, [4, 5, 6, 7]) } +pub fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_xor(a, b) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_p8)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -12729,18 +13254,18 @@ pub fn vget_high_p16(a: poly16x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_high_p8(a: poly8x16_t) -> poly8x8_t { - unsafe { simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } +pub fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_xor(a, b) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s16)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -12750,18 +13275,18 @@ pub fn vget_high_p8(a: poly8x16_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_high_s16(a: int16x8_t) -> int16x4_t { - unsafe { simd_shuffle!(a, a, [4, 5, 6, 7]) } +pub fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_xor(a, b) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s32)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -12771,18 +13296,18 @@ pub fn vget_high_s16(a: int16x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_high_s32(a: int32x4_t) -> int32x2_t { - unsafe { simd_shuffle!(a, a, [2, 3]) } +pub fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { simd_xor(a, b) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s8)"] +#[doc = "Vector bitwise exclusive or (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(eor) )] #[cfg_attr( not(target_arch = "arm"), @@ -12792,61 +13317,77 @@ pub fn vget_high_s32(a: int32x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_high_s8(a: int8x16_t) -> int8x8_t { - unsafe { simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } +pub fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { simd_xor(a, b) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_high_u16(a: uint16x8_t) -> uint16x4_t { - unsafe { simd_shuffle!(a, a, [4, 5, 6, 7]) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vext_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u32)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(ext, N = 3) )] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_high_u32(a: uint32x4_t) -> uint32x2_t { - unsafe { simd_shuffle!(a, a, [2, 3]) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vext_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float16x4_t = + simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(ext, N = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12855,19 +13396,22 @@ pub fn vget_high_u32(a: uint32x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_high_u8(a: uint8x16_t) -> uint8x8_t { - unsafe { simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } +pub fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_p64)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(ext, N = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12876,19 +13420,27 @@ pub fn vget_high_u8(a: uint8x16_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_high_p64(a: poly64x2_t) -> poly64x1_t { - unsafe { transmute(u64x1::new(simd_extract!(a, 1))) } +pub fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float32x2_t = simd_shuffle!(a, b, [N as u32, N as u32 + 1]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s64)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(ext, N = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12897,19 +13449,22 @@ pub fn vget_high_p64(a: poly64x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_high_s64(a: int64x2_t) -> int64x1_t { - unsafe { int64x1_t([simd_extract!(a, 1)]) } +pub fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u64)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ext) + assert_instr(ext, N = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12918,52 +13473,51 @@ pub fn vget_high_s64(a: int64x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_high_u64(a: uint64x2_t) -> uint64x1_t { - unsafe { uint64x1_t([simd_extract!(a, 1)]) } +pub fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int32x2_t = simd_shuffle!(a, b, [N as u32, N as u32 + 1]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Duplicate vector element to scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_f16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(ext, N = 1) )] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vget_lane_f16(a: float16x4_t) -> f16 { - static_assert_uimm_bits!(LANE, 2); - unsafe { simd_extract!(a, LANE as u32) } +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1]) } } -#[doc = "Duplicate vector element to scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_f16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(ext, N = 1) )] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vgetq_lane_f16(a: float16x8_t) -> f16 { - static_assert_uimm_bits!(LANE, 3); - unsafe { simd_extract!(a, LANE as u32) } -} -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_f32)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12972,17 +13526,28 @@ pub fn vgetq_lane_f16(a: float16x8_t) -> f16 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_lane_f32(v: float32x2_t) -> f32 { - static_assert_uimm_bits!(IMM5, 1); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint32x2_t = simd_shuffle!(a, b, [N as u32, N as u32 + 1]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_p16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, N = 0) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -12991,17 +13556,23 @@ pub fn vget_lane_f32(v: float32x2_t) -> f32 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_lane_p16(v: poly16x4_t) -> p16 { - static_assert_uimm_bits!(IMM5, 2); - unsafe { simd_extract!(v, IMM5 as u32) } +pub unsafe fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t { + static_assert!(N == 0); + a } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_p8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, N = 0) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13010,17 +13581,22 @@ pub fn vget_lane_p16(v: poly16x4_t) -> p16 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_lane_p8(v: poly8x8_t) -> p8 { - static_assert_uimm_bits!(IMM5, 3); - unsafe { simd_extract!(v, IMM5 as u32) } +pub unsafe fn vext_u64(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_t { + static_assert!(N == 0); + a } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13029,17 +13605,37 @@ pub fn vget_lane_p8(v: poly8x8_t) -> p8 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_lane_s16(v: int16x4_t) -> i16 { - static_assert_uimm_bits!(IMM5, 2); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7 + ] + ) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s32)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13048,17 +13644,40 @@ pub fn vget_lane_s16(v: int16x4_t) -> i16 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_lane_s32(v: int32x2_t) -> i32 { - static_assert_uimm_bits!(IMM5, 1); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7 + ] + ); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13067,17 +13686,37 @@ pub fn vget_lane_s32(v: int32x2_t) -> i32 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_lane_s8(v: int8x8_t) -> i8 { - static_assert_uimm_bits!(IMM5, 3); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7 + ] + ) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13086,17 +13725,40 @@ pub fn vget_lane_s8(v: int8x8_t) -> i8 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_lane_u16(v: uint16x4_t) -> u16 { - static_assert_uimm_bits!(IMM5, 2); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7 + ] + ); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u32)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13105,17 +13767,37 @@ pub fn vget_lane_u16(v: uint16x4_t) -> u16 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_lane_u32(v: uint32x2_t) -> u32 { - static_assert_uimm_bits!(IMM5, 1); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7 + ] + ) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13124,17 +13806,40 @@ pub fn vget_lane_u32(v: uint32x2_t) -> u32 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_lane_u8(v: uint8x8_t) -> u8 { - static_assert_uimm_bits!(IMM5, 3); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7 + ] + ); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_f32)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13143,17 +13848,37 @@ pub fn vget_lane_u8(v: uint8x8_t) -> u8 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vgetq_lane_f32(v: float32x4_t) -> f32 { - static_assert_uimm_bits!(IMM5, 2); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7 + ] + ) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_p16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13162,17 +13887,40 @@ pub fn vgetq_lane_f32(v: float32x4_t) -> f32 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vgetq_lane_p16(v: poly16x8_t) -> p16 { - static_assert_uimm_bits!(IMM5, 3); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7 + ] + ); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_p64)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13181,17 +13929,37 @@ pub fn vgetq_lane_p16(v: poly16x8_t) -> p16 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vgetq_lane_p64(v: poly64x2_t) -> p64 { - static_assert_uimm_bits!(IMM5, 1); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7 + ] + ) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_p8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13200,17 +13968,40 @@ pub fn vgetq_lane_p64(v: poly64x2_t) -> p64 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vgetq_lane_p8(v: poly8x16_t) -> p8 { - static_assert_uimm_bits!(IMM5, 4); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7 + ] + ); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13219,17 +14010,37 @@ pub fn vgetq_lane_p8(v: poly8x16_t) -> p8 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vgetq_lane_s16(v: int16x8_t) -> i16 { - static_assert_uimm_bits!(IMM5, 3); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7 + ] + ) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s32)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13238,55 +14049,123 @@ pub fn vgetq_lane_s16(v: int16x8_t) -> i16 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vgetq_lane_s32(v: int32x4_t) -> i32 { - static_assert_uimm_bits!(IMM5, 2); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7 + ] + ); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s64)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vgetq_lane_s64(v: int64x2_t) -> i64 { - static_assert_uimm_bits!(IMM5, 1); - unsafe { simd_extract!(v, IMM5 as u32) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vextq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7 + ] + ) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 7) +)] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vgetq_lane_s8(v: int8x16_t) -> i8 { - static_assert_uimm_bits!(IMM5, 4); - unsafe { simd_extract!(v, IMM5 as u32) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vextq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7 + ] + ); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13295,17 +14174,22 @@ pub fn vgetq_lane_s8(v: int8x16_t) -> i8 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vgetq_lane_u16(v: uint16x8_t) -> u16 { - static_assert_uimm_bits!(IMM5, 3); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]) } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u32)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13314,17 +14198,28 @@ pub fn vgetq_lane_u16(v: uint16x8_t) -> u16 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vgetq_lane_u32(v: uint32x4_t) -> u32 { - static_assert_uimm_bits!(IMM5, 2); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x4_t = + simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u64)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13333,17 +14228,22 @@ pub fn vgetq_lane_u32(v: uint32x4_t) -> u32 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vgetq_lane_u64(v: uint64x2_t) -> u64 { - static_assert_uimm_bits!(IMM5, 2); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]) } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13352,17 +14252,28 @@ pub fn vgetq_lane_u64(v: uint64x2_t) -> u64 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vgetq_lane_u8(v: uint8x16_t) -> u8 { - static_assert_uimm_bits!(IMM5, 4); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int16x4_t = + simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_p64)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13371,17 +14282,22 @@ pub fn vgetq_lane_u8(v: uint8x16_t) -> u8 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_lane_p64(v: poly64x1_t) -> p64 { - static_assert!(IMM5 == 0); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]) } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s64)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13390,17 +14306,28 @@ pub fn vget_lane_p64(v: poly64x1_t) -> p64 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_lane_s64(v: int64x1_t) -> i64 { - static_assert!(IMM5 == 0); - unsafe { simd_extract!(v, IMM5 as u32) } +pub fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int32x4_t = + simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Move vector element to general-purpose register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u64)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(1)] -#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13409,16 +14336,22 @@ pub fn vget_lane_s64(v: int64x1_t) -> i64 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_lane_u64(v: uint64x1_t) -> u64 { - static_assert!(IMM5 == 0); - unsafe { simd_extract!(v, 0) } +pub fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_f32)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13427,15 +14360,28 @@ pub fn vget_lane_u64(v: uint64x1_t) -> u64 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_low_f32(a: float32x4_t) -> float32x2_t { - unsafe { simd_shuffle!(a, a, [0, 1]) } +pub fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint16x4_t = + simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_p16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13444,15 +14390,22 @@ pub fn vget_low_f32(a: float32x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { - unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } +pub fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_p8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13461,15 +14414,28 @@ pub fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { - unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } +pub fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint32x4_t = + simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13478,15 +14444,22 @@ pub fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_low_s16(a: int16x8_t) -> int16x4_t { - unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } +pub fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s32)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 3) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13495,15 +14468,28 @@ pub fn vget_low_s16(a: int16x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_low_s32(a: int32x4_t) -> int32x2_t { - unsafe { simd_shuffle!(a, a, [0, 1]) } +pub fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 2); + unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: poly16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: poly16x4_t = + simd_shuffle!(a, b, [N as u32, N as u32 + 1, N as u32 + 2, N as u32 + 3]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 1) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13512,15 +14498,22 @@ pub fn vget_low_s32(a: int32x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_low_s8(a: int8x16_t) -> int8x8_t { - unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } +pub fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 1) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13529,15 +14522,27 @@ pub fn vget_low_s8(a: int8x16_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { - unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } +pub fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int64x2_t = simd_shuffle!(a, b, [N as u32, N as u32 + 1]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u32)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 1) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13546,15 +14551,22 @@ pub fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { - unsafe { simd_shuffle!(a, a, [0, 1]) } +pub fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { simd_shuffle!(a, b, [N as u32, N as u32 + 1]) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 1) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13563,15 +14575,27 @@ pub fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { - unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } +pub fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 1); + unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint64x2_t = simd_shuffle!(a, b, [N as u32, N as u32 + 1]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_p64)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 15) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13580,15 +14604,45 @@ pub fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_low_p64(a: poly64x2_t) -> poly64x1_t { - unsafe { transmute(u64x1::new(simd_extract!(a, 0))) } +pub fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { + simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7, + N as u32 + 8, + N as u32 + 9, + N as u32 + 10, + N as u32 + 11, + N as u32 + 12, + N as u32 + 13, + N as u32 + 14, + N as u32 + 15 + ] + ) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s64)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 15) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13597,15 +14651,54 @@ pub fn vget_low_p64(a: poly64x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_low_s64(a: int64x2_t) -> int64x1_t { - unsafe { int64x1_t([simd_extract!(a, 0)]) } +pub fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7, + N as u32 + 8, + N as u32 + 9, + N as u32 + 10, + N as u32 + 11, + N as u32 + 12, + N as u32 + 13, + N as u32 + 14, + N as u32 + 15 + ] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u64)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ext, N = 15) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13614,19 +14707,45 @@ pub fn vget_low_s64(a: int64x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { - unsafe { uint64x1_t([simd_extract!(a, 0)]) } +pub fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { + simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7, + N as u32 + 8, + N as u32 + 9, + N as u32 + 10, + N as u32 + 11, + N as u32 + 12, + N as u32 + 13, + N as u32 + 14, + N as u32 + 15 + ] + ) + } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(ext, N = 15) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13635,27 +14754,54 @@ pub fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i8")] - fn _vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; +pub fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7, + N as u32 + 8, + N as u32 + 9, + N as u32 + 10, + N as u32 + 11, + N as u32 + 12, + N as u32 + 13, + N as u32 + 14, + N as u32 + 15 + ] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } - unsafe { _vhadd_s8(a, b) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s8)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(ext, N = 15) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13664,27 +14810,45 @@ pub fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v16i8")] - fn _vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; +pub fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { + simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7, + N as u32 + 8, + N as u32 + 9, + N as u32 + 10, + N as u32 + 11, + N as u32 + 12, + N as u32 + 13, + N as u32 + 14, + N as u32 + 15 + ] + ) } - unsafe { _vhaddq_s8(a, b) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s16)"] +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(ext, N = 15) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -13693,84 +14857,95 @@ pub fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i16")] - fn _vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; +pub fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 4); + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = simd_shuffle!( + a, + b, + [ + N as u32, + N as u32 + 1, + N as u32 + 2, + N as u32 + 3, + N as u32 + 4, + N as u32 + 5, + N as u32 + 6, + N as u32 + 7, + N as u32 + 8, + N as u32 + 9, + N as u32 + 10, + N as u32 + 11, + N as u32 + 12, + N as u32 + 13, + N as u32 + 14, + N as u32 + 15 + ] + ); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } - unsafe { _vhadd_s16(a, b) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s16)"] +#[doc = "Floating-point fused Multiply-Add to accumulator (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(fmla) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i16")] - fn _vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - unsafe { _vhaddq_s16(a, b) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vfma_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { + unsafe { simd_fma(b, c, a) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s32)"] +#[doc = "Floating-point fused Multiply-Add to accumulator (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(fmla) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v2i32")] - fn _vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - unsafe { _vhadd_s32(a, b) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vfmaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { + unsafe { simd_fma(b, c, a) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s32)"] +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shadd) + assert_instr(fmla) )] #[cfg_attr( not(target_arch = "arm"), @@ -13780,26 +14955,18 @@ pub fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i32")] - fn _vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - unsafe { _vhaddq_s32(a, b) } +pub fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe { simd_fma(b, c, a) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u8)"] +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(fmla) )] #[cfg_attr( not(target_arch = "arm"), @@ -13809,26 +14976,18 @@ pub fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i8")] - fn _vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - } - unsafe { _vhadd_u8(a, b) } +pub fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe { simd_fma(b, c, a) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u8)"] +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(fmla) )] #[cfg_attr( not(target_arch = "arm"), @@ -13838,26 +14997,18 @@ pub fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v16i8")] - fn _vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - } - unsafe { _vhaddq_u8(a, b) } +pub fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + vfma_f32(a, b, vdup_n_f32_vfp4(c)) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u16)"] +#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(fmla) )] #[cfg_attr( not(target_arch = "arm"), @@ -13867,84 +15018,70 @@ pub fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i16")] - fn _vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - } - unsafe { _vhadd_u16(a, b) } +pub fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + vfmaq_f32(a, b, vdupq_n_f32_vfp4(c)) } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u16)"] +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(fmls) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i16")] - fn _vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; +#[cfg(not(target_arch = "arm64ec"))] +pub fn vfms_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { + unsafe { + let b: float16x4_t = simd_neg(b); + vfma_f16(a, b, c) } - unsafe { _vhaddq_u16(a, b) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u32)"] +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(fmls) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v2i32")] - fn _vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; +#[cfg(not(target_arch = "arm64ec"))] +pub fn vfmsq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { + unsafe { + let b: float16x8_t = simd_neg(b); + vfmaq_f16(a, b, c) } - unsafe { _vhadd_u32(a, b) } } -#[doc = "Halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u32)"] +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhadd) + assert_instr(fmls) )] #[cfg_attr( not(target_arch = "arm"), @@ -13954,26 +15091,21 @@ pub fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i32")] - fn _vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; +pub fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe { + let b: float32x2_t = simd_neg(b); + vfma_f32(a, b, c) } - unsafe { _vhaddq_u32(a, b) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s16)"] +#[doc = "Floating-point fused multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(fmls) )] #[cfg_attr( not(target_arch = "arm"), @@ -13983,26 +15115,21 @@ pub fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i16")] - fn _vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; +pub fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe { + let b: float32x4_t = simd_neg(b); + vfmaq_f32(a, b, c) } - unsafe { _vhsub_s16(a, b) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s16)"] +#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(fmls) )] #[cfg_attr( not(target_arch = "arm"), @@ -14012,26 +15139,18 @@ pub fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i16")] - fn _vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - unsafe { _vhsubq_s16(a, b) } +pub fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + vfms_f32(a, b, vdup_n_f32_vfp4(c)) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s32)"] +#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) + assert_instr(fmls) )] #[cfg_attr( not(target_arch = "arm"), @@ -14041,142 +15160,107 @@ pub fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v2i32")] - fn _vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - unsafe { _vhsub_s32(a, b) } +pub fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + vfmsq_f32(a, b, vdupq_n_f32_vfp4(c)) } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s32)"] +#[doc = "Duplicate vector element to vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) -)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i32")] - fn _vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - unsafe { _vhsubq_s32(a, b) } +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(nop))] +pub fn vget_high_f16(a: float16x8_t) -> float16x4_t { + unsafe { simd_shuffle!(a, a, [4, 5, 6, 7]) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s8)"] +#[doc = "Duplicate vector element to vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_f16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) -)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i8")] - fn _vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(nop))] +pub fn vget_high_f16(a: float16x8_t) -> float16x4_t { + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vhsub_s8(a, b) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s8)"] +#[doc = "Duplicate vector element to vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shsub) -)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.shsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v16i8")] - fn _vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - } - unsafe { _vhsubq_s8(a, b) } +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(nop))] +pub fn vget_low_f16(a: float16x8_t) -> float16x4_t { + unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u8)"] +#[doc = "Duplicate vector element to vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_f16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) -)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i8")] - fn _vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(nop))] +pub fn vget_low_f16(a: float16x8_t) -> float16x4_t { + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vhsub_u8(a, b) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u8)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14186,26 +15270,19 @@ pub fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v16i8")] - fn _vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - } - unsafe { _vhsubq_u8(a, b) } +pub fn vget_high_f32(a: float32x4_t) -> float32x2_t { + unsafe { simd_shuffle!(a, a, [2, 3]) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14215,26 +15292,23 @@ pub fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i16")] - fn _vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; +pub fn vget_high_f32(a: float32x4_t) -> float32x2_t { + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float32x2_t = simd_shuffle!(a, a, [2, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) } - unsafe { _vhsub_u16(a, b) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14244,26 +15318,19 @@ pub fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i16")] - fn _vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - } - unsafe { _vhsubq_u16(a, b) } +pub fn vget_high_p16(a: poly16x8_t) -> poly16x4_t { + unsafe { simd_shuffle!(a, a, [4, 5, 6, 7]) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14273,26 +15340,23 @@ pub fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v2i32")] - fn _vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; +pub fn vget_high_p16(a: poly16x8_t) -> poly16x4_t { + unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vhsub_u32(a, b) } } -#[doc = "Signed halving subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uhsub) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14302,66 +15366,46 @@ pub fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uhsub.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i32")] - fn _vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - } - unsafe { _vhsubq_u32(a, b) } +pub fn vget_high_p8(a: poly8x16_t) -> poly8x8_t { + unsafe { simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } } -#[doc = "Load one single-element structure and replicate to all lanes of one register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld1_dup_f16(ptr: *const f16) -> float16x4_t { - let x: float16x4_t = vld1_lane_f16::<0>(ptr, transmute(f16x4::splat(0.0))); - simd_shuffle!(x, x, [0, 0, 0, 0]) -} -#[doc = "Load one single-element structure and replicate to all lanes of one register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld1q_dup_f16(ptr: *const f16) -> float16x8_t { - let x: float16x8_t = vld1q_lane_f16::<0>(ptr, transmute(f16x8::splat(0.0))); - simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vget_high_p8(a: poly8x16_t) -> poly8x8_t { + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14371,20 +15415,19 @@ pub unsafe fn vld1q_dup_f16(ptr: *const f16) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { - transmute(f32x2::splat(*ptr)) +pub fn vget_high_s16(a: int16x8_t) -> int16x4_t { + unsafe { simd_shuffle!(a, a, [4, 5, 6, 7]) } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14394,20 +15437,23 @@ pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { - transmute(u16x4::splat(*ptr)) +pub fn vget_high_s16(a: int16x8_t) -> int16x4_t { + unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14417,20 +15463,19 @@ pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { - transmute(u8x8::splat(*ptr)) +pub fn vget_high_s32(a: int32x4_t) -> int32x2_t { + unsafe { simd_shuffle!(a, a, [2, 3]) } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14440,20 +15485,23 @@ pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { - transmute(i16x4::splat(*ptr)) +pub fn vget_high_s32(a: int32x4_t) -> int32x2_t { + unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int32x2_t = simd_shuffle!(a, a, [2, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14463,20 +15511,19 @@ pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { - transmute(i32x2::splat(*ptr)) +pub fn vget_high_s8(a: int8x16_t) -> int8x8_t { + unsafe { simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14486,20 +15533,24 @@ pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { - transmute(i8x8::splat(*ptr)) +pub fn vget_high_s8(a: int8x16_t) -> int8x8_t { + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14509,20 +15560,19 @@ pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { - transmute(u16x4::splat(*ptr)) +pub fn vget_high_u16(a: uint16x8_t) -> uint16x4_t { + unsafe { simd_shuffle!(a, a, [4, 5, 6, 7]) } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14532,20 +15582,23 @@ pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { - transmute(u32x2::splat(*ptr)) +pub fn vget_high_u16(a: uint16x8_t) -> uint16x4_t { + unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14555,20 +15608,19 @@ pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { - transmute(u8x8::splat(*ptr)) +pub fn vget_high_u32(a: uint32x4_t) -> uint32x2_t { + unsafe { simd_shuffle!(a, a, [2, 3]) } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14578,20 +15630,23 @@ pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t { - transmute(f32x4::splat(*ptr)) +pub fn vget_high_u32(a: uint32x4_t) -> uint32x2_t { + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint32x2_t = simd_shuffle!(a, a, [2, 3]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14601,20 +15656,19 @@ pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { - transmute(u16x8::splat(*ptr)) +pub fn vget_high_u8(a: uint8x16_t) -> uint8x8_t { + unsafe { simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14624,20 +15678,24 @@ pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { - transmute(u8x16::splat(*ptr)) +pub fn vget_high_u8(a: uint8x16_t) -> uint8x8_t { + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_p64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14647,20 +15705,19 @@ pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { - transmute(i16x8::splat(*ptr)) +pub fn vget_high_p64(a: poly64x2_t) -> poly64x1_t { + unsafe { transmute(u64x1::new(simd_extract!(a, 1))) } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_p64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14670,20 +15727,22 @@ pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t { - transmute(i32x4::splat(*ptr)) +pub fn vget_high_p64(a: poly64x2_t) -> poly64x1_t { + unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(u64x1::new(simd_extract!(a, 1))) + } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14693,20 +15752,19 @@ pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { - transmute(i64x2::splat(*ptr)) +pub fn vget_high_s64(a: int64x2_t) -> int64x1_t { + unsafe { int64x1_t([simd_extract!(a, 1)]) } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14716,20 +15774,22 @@ pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { - transmute(i8x16::splat(*ptr)) +pub fn vget_high_s64(a: int64x2_t) -> int64x1_t { + unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + int64x1_t([simd_extract!(a, 1)]) + } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14739,20 +15799,19 @@ pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { - transmute(u16x8::splat(*ptr)) +pub fn vget_high_u64(a: uint64x2_t) -> uint64x1_t { + unsafe { uint64x1_t([simd_extract!(a, 1)]) } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(ext) )] #[cfg_attr( not(target_arch = "arm"), @@ -14762,44 +15821,102 @@ pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t { - transmute(u32x4::splat(*ptr)) +pub fn vget_high_u64(a: uint64x2_t) -> uint64x1_t { + unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + uint64x1_t([simd_extract!(a, 1)]) + } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_f16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vget_lane_f16(a: float16x4_t) -> f16 { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_extract!(a, LANE as u32) } +} +#[doc = "Duplicate vector element to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vget_lane_f16(a: float16x4_t) -> f16 { + static_assert_uimm_bits!(LANE, 2); + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + simd_extract!(a, LANE as u32) + } +} +#[doc = "Duplicate vector element to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_f16)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) )] -pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { - transmute(u64x2::splat(*ptr)) +#[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vgetq_lane_f16(a: float16x8_t) -> f16 { + static_assert_uimm_bits!(LANE, 3); + unsafe { simd_extract!(a, LANE as u32) } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_f16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vgetq_lane_f16(a: float16x8_t) -> f16 { + static_assert_uimm_bits!(LANE, 3); + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + simd_extract!(a, LANE as u32) + } +} +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_f32)"] +#[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14808,21 +15925,18 @@ pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t { - transmute(u8x16::splat(*ptr)) +pub fn vget_lane_f32(v: float32x2_t) -> f32 { + static_assert_uimm_bits!(IMM5, 1); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_f32)"] #[inline] -#[target_feature(enable = "neon,aes")] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ldr) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14831,30 +15945,21 @@ pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_dup_p64(ptr: *const p64) -> poly64x1_t { - let x: poly64x1_t; - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = crate::core_arch::aarch64::vld1_p64(ptr); +pub fn vget_lane_f32(v: float32x2_t) -> f32 { + static_assert_uimm_bits!(IMM5, 1); + unsafe { + let v: float32x2_t = simd_shuffle!(v, v, [1, 0]); + simd_extract!(v, IMM5 as u32) } - #[cfg(target_arch = "arm")] - { - x = crate::core_arch::arm::vld1_p64(ptr); - }; - x } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ldr) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14863,30 +15968,18 @@ pub unsafe fn vld1_dup_p64(ptr: *const p64) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t { - let x: int64x1_t; - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = crate::core_arch::aarch64::vld1_s64(ptr); - } - #[cfg(target_arch = "arm")] - { - x = crate::core_arch::arm::vld1_s64(ptr); - }; - x +pub fn vget_lane_p16(v: poly16x4_t) -> p16 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ldr) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -14895,367 +15988,84 @@ pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t { - let x: uint64x1_t; - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - { - x = crate::core_arch::aarch64::vld1_u64(ptr); +pub fn vget_lane_p16(v: poly16x4_t) -> p16 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { + let v: poly16x4_t = simd_shuffle!(v, v, [3, 2, 1, 0]); + simd_extract!(v, IMM5 as u32) } - #[cfg(target_arch = "arm")] - { - x = crate::core_arch::arm::vld1_u64(ptr); - }; - x -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t { - transmute(vld1_v4f16( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t { - transmute(vld1q_v8f16( - ptr as *const i8, - crate::mem::align_of::() as i32, - )) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld1_f16_x2(a: *const f16) -> float16x4x2_t { - crate::ptr::read_unaligned(a.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld1_f16_x3(a: *const f16) -> float16x4x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_lane_p8(v: poly8x8_t) -> p8 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld1_f16_x4(a: *const f16) -> float16x4x4_t { - crate::ptr::read_unaligned(a.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld1q_f16_x2(a: *const f16) -> float16x8x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_lane_p8(v: poly8x8_t) -> p8 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { + let v: poly8x8_t = simd_shuffle!(v, v, [7, 6, 5, 4, 3, 2, 1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld1q_f16_x3(a: *const f16) -> float16x8x3_t { - crate::ptr::read_unaligned(a.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld1q_f16_x4(a: *const f16) -> float16x8x4_t { - crate::ptr::read_unaligned(a.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - transmute(vld1_v2f32::(ptr as *const i8)) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] -pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - transmute(vld1q_v4f32::(ptr as *const i8)) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - transmute(vld1_v8i8::(ptr as *const i8)) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - transmute(vld1q_v16i8::(ptr as *const i8)) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - transmute(vld1_v4i16::(ptr as *const i8)) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - transmute(vld1q_v8i16::(ptr as *const i8)) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - transmute(vld1_v2i32::(ptr as *const i8)) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] -pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - transmute(vld1q_v4i32::(ptr as *const i8)) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - transmute(vld1_v1i64::(ptr as *const i8)) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] -pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - transmute(vld1q_v2i64::(ptr as *const i8)) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - transmute(vld1_v8i8::(ptr as *const i8)) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - transmute(vld1q_v16i8::(ptr as *const i8)) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - transmute(vld1_v4i16::(ptr as *const i8)) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - transmute(vld1q_v8i16::(ptr as *const i8)) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,aes")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] -pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - transmute(vld1q_v2i64::(ptr as *const i8)) +pub fn vget_lane_s16(v: int16x4_t) -> i16 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15264,21 +16074,21 @@ pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_lane_s16(v: int16x4_t) -> i16 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { + let v: int16x4_t = simd_shuffle!(v, v, [3, 2, 1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15287,21 +16097,18 @@ pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_lane_s32(v: int32x2_t) -> i32 { + static_assert_uimm_bits!(IMM5, 1); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15310,21 +16117,21 @@ pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_lane_s32(v: int32x2_t) -> i32 { + static_assert_uimm_bits!(IMM5, 1); + unsafe { + let v: int32x2_t = simd_shuffle!(v, v, [1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15333,21 +16140,18 @@ pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_lane_s8(v: int8x8_t) -> i8 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15356,21 +16160,21 @@ pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_lane_s8(v: int8x8_t) -> i8 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { + let v: int8x8_t = simd_shuffle!(v, v, [7, 6, 5, 4, 3, 2, 1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15379,62 +16183,41 @@ pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_lane_u16(v: uint16x4_t) -> u16 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load one single-element structure to one lane of one register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1, LANE = 0))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 0) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld1_lane_f16(ptr: *const f16, src: float16x4_t) -> float16x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(src, LANE as u32, *ptr) -} -#[doc = "Load one single-element structure to one lane of one register"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1, LANE = 0))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 0) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld1q_lane_f16(ptr: *const f16, src: float16x8_t) -> float16x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(src, LANE as u32, *ptr) +pub fn vget_lane_u16(v: uint16x4_t) -> u16 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { + let v: uint16x4_t = simd_shuffle!(v, v, [3, 2, 1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 1) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15443,23 +16226,18 @@ pub unsafe fn vld1q_lane_f16(ptr: *const f16, src: float16x8_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(src, LANE as u32, *ptr) +pub fn vget_lane_u32(v: uint32x2_t) -> u32 { + static_assert_uimm_bits!(IMM5, 1); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 3) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15468,23 +16246,21 @@ pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(src, LANE as u32, *ptr) +pub fn vget_lane_u32(v: uint32x2_t) -> u32 { + static_assert_uimm_bits!(IMM5, 1); + unsafe { + let v: uint32x2_t = simd_shuffle!(v, v, [1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 7) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15493,23 +16269,18 @@ pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) - target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(src, LANE as u32, *ptr) +pub fn vget_lane_u8(v: uint8x8_t) -> u8 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 3) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15518,23 +16289,21 @@ pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> p target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(src, LANE as u32, *ptr) +pub fn vget_lane_u8(v: uint8x8_t) -> u8 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { + let v: uint8x8_t = simd_shuffle!(v, v, [7, 6, 5, 4, 3, 2, 1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 1) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15543,23 +16312,18 @@ pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_f32(v: float32x4_t) -> f32 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ldr, LANE = 0) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15568,23 +16332,21 @@ pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> int64x1_t { - static_assert!(LANE == 0); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_f32(v: float32x4_t) -> f32 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { + let v: float32x4_t = simd_shuffle!(v, v, [3, 2, 1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 7) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15593,23 +16355,18 @@ pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_p16(v: poly16x8_t) -> p16 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 3) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15618,23 +16375,21 @@ pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> in target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_p16(v: poly16x8_t) -> p16 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { + let v: poly16x8_t = simd_shuffle!(v, v, [7, 6, 5, 4, 3, 2, 1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_p64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 1) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15643,23 +16398,18 @@ pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) - target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_p64(v: poly64x2_t) -> p64 { + static_assert_uimm_bits!(IMM5, 1); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_p64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ldr, LANE = 0) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15668,23 +16418,21 @@ pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) - target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) -> uint64x1_t { - static_assert!(LANE == 0); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_p64(v: poly64x2_t) -> p64 { + static_assert_uimm_bits!(IMM5, 1); + unsafe { + let v: poly64x2_t = simd_shuffle!(v, v, [1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 7) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15693,23 +16441,18 @@ pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) - target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_p8(v: poly8x16_t) -> p8 { + static_assert_uimm_bits!(IMM5, 4); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 3) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15718,23 +16461,22 @@ pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> u target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_p8(v: poly8x16_t) -> p8 { + static_assert_uimm_bits!(IMM5, 4); + unsafe { + let v: poly8x16_t = + simd_shuffle!(v, v, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 7) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15743,23 +16485,18 @@ pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_s16(v: int16x8_t) -> i16 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 15) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15768,23 +16505,21 @@ pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(LANE, 4); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_s16(v: int16x8_t) -> i16 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { + let v: int16x8_t = simd_shuffle!(v, v, [7, 6, 5, 4, 3, 2, 1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 7) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15793,23 +16528,18 @@ pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_s32(v: int32x4_t) -> i32 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 3) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15818,23 +16548,21 @@ pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) - target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_s32(v: int32x4_t) -> i32 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { + let v: int32x4_t = simd_shuffle!(v, v, [3, 2, 1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 1) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15843,23 +16571,18 @@ pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) - target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_s64(v: int64x2_t) -> i64 { + static_assert_uimm_bits!(IMM5, 1); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 15) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15868,23 +16591,21 @@ pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) - target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(LANE, 4); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_s64(v: int64x2_t) -> i64 { + static_assert_uimm_bits!(IMM5, 1); + unsafe { + let v: int64x2_t = simd_shuffle!(v, v, [1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 7) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15893,23 +16614,18 @@ pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_s8(v: int8x16_t) -> i8 { + static_assert_uimm_bits!(IMM5, 4); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 3) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15918,23 +16634,22 @@ pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_s8(v: int8x16_t) -> i8 { + static_assert_uimm_bits!(IMM5, 4); + unsafe { + let v: int8x16_t = + simd_shuffle!(v, v, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 1) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15943,23 +16658,18 @@ pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_u16(v: uint16x8_t) -> u16 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 15) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15968,23 +16678,21 @@ pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(LANE, 4); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_u16(v: uint16x8_t) -> u16 { + static_assert_uimm_bits!(IMM5, 3); + unsafe { + let v: uint16x8_t = simd_shuffle!(v, v, [7, 6, 5, 4, 3, 2, 1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u32)"] #[inline] -#[target_feature(enable = "neon,aes")] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ldr, LANE = 0) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -15993,23 +16701,18 @@ pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_lane_p64(ptr: *const p64, src: poly64x1_t) -> poly64x1_t { - static_assert!(LANE == 0); - simd_insert!(src, LANE as u32, *ptr) +pub fn vgetq_lane_u32(v: uint32x4_t) -> u32 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load one single-element structure to one lane of one register."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u32)"] #[inline] -#[target_feature(enable = "neon,aes")] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1, LANE = 1) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16018,40 +16721,21 @@ pub unsafe fn vld1_lane_p64(ptr: *const p64, src: poly64x1_t) - target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_lane_p64(ptr: *const p64, src: poly64x2_t) -> poly64x2_t { - static_assert_uimm_bits!(LANE, 1); - simd_insert!(src, LANE as u32, *ptr) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,aes")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { - let a: *const i8 = ptr as *const i8; - let b: i32 = crate::mem::align_of::() as i32; - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v1i64")] - fn _vld1_v1i64(a: *const i8, b: i32) -> int64x1_t; +pub fn vgetq_lane_u32(v: uint32x4_t) -> u32 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { + let v: uint32x4_t = simd_shuffle!(v, v, [3, 2, 1, 0]); + simd_extract!(v, IMM5 as u32) } - transmute(_vld1_v1i64(a, b)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u64)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16060,21 +16744,18 @@ pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vgetq_lane_u64(v: uint64x2_t) -> u64 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u64)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 1))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16083,21 +16764,21 @@ pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vgetq_lane_u64(v: uint64x2_t) -> u64 { + static_assert_uimm_bits!(IMM5, 2); + unsafe { + let v: uint64x2_t = simd_shuffle!(v, v, [1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u8)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16106,21 +16787,18 @@ pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vgetq_lane_u8(v: uint8x16_t) -> u8 { + static_assert_uimm_bits!(IMM5, 4); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u8)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 2))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16129,21 +16807,21 @@ pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vgetq_lane_u8(v: uint8x16_t) -> u8 { + static_assert_uimm_bits!(IMM5, 4); + unsafe { + let v: uint8x16_t = + simd_shuffle!(v, v, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + simd_extract!(v, IMM5 as u32) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_p64)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16152,21 +16830,17 @@ pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_lane_p64(v: poly64x1_t) -> p64 { + static_assert!(IMM5 == 0); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s64)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16175,125 +16849,17 @@ pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { - crate::ptr::read_unaligned(a.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vld1_v8i8::(ptr as *const i8) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] -pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vld1q_v16i8::(ptr as *const i8) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vld1_v4i16::(ptr as *const i8) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] -pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vld1q_v8i16::(ptr as *const i8) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vld1_v2i32::(ptr as *const i8) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] -pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vld1q_v4i32::(ptr as *const i8) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] -pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vld1_v1i64::(ptr as *const i8) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] -pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vld1q_v2i64::(ptr as *const i8) +pub fn vget_lane_s64(v: int64x1_t) -> i64 { + static_assert!(IMM5 == 0); + unsafe { simd_extract!(v, IMM5 as u32) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Move vector element to general-purpose register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(nop, IMM5 = 0))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16302,21 +16868,17 @@ pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_lane_u64(v: uint64x1_t) -> u64 { + static_assert!(IMM5 == 0); + unsafe { simd_extract!(v, 0) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16325,21 +16887,16 @@ pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_f32(a: float32x4_t) -> float32x2_t { + unsafe { simd_shuffle!(a, a, [0, 1]) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16348,21 +16905,20 @@ pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_f32(a: float32x4_t) -> float32x2_t { + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float32x2_t = simd_shuffle!(a, a, [0, 1]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16371,21 +16927,16 @@ pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { + unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16394,21 +16945,20 @@ pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { + unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16417,21 +16967,16 @@ pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { + unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16440,21 +16985,21 @@ pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16463,21 +17008,16 @@ pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_s16(a: int16x8_t) -> int16x4_t { + unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16486,21 +17026,20 @@ pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_s16(a: int16x8_t) -> int16x4_t { + unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16509,21 +17048,16 @@ pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_s32(a: int32x4_t) -> int32x2_t { + unsafe { simd_shuffle!(a, a, [0, 1]) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16532,21 +17066,20 @@ pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_s32(a: int32x4_t) -> int32x2_t { + unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int32x2_t = simd_shuffle!(a, a, [0, 1]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16555,21 +17088,16 @@ pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_s8(a: int8x16_t) -> int8x8_t { + unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16578,21 +17106,21 @@ pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_s8(a: int8x16_t) -> int8x8_t { + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16601,21 +17129,16 @@ pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { + unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16624,21 +17147,20 @@ pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { + unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16647,21 +17169,16 @@ pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { + unsafe { simd_shuffle!(a, a, [0, 1]) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16670,21 +17187,20 @@ pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint32x2_t = simd_shuffle!(a, a, [0, 1]); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16693,21 +17209,16 @@ pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { + unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16716,21 +17227,21 @@ pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_p64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16739,21 +17250,16 @@ pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_p64(a: poly64x2_t) -> poly64x1_t { + unsafe { transmute(u64x1::new(simd_extract!(a, 0))) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_p64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16762,21 +17268,19 @@ pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_p64(a: poly64x2_t) -> poly64x1_t { + unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(u64x1::new(simd_extract!(a, 0))) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16785,21 +17289,16 @@ pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_s64(a: int64x2_t) -> int64x1_t { + unsafe { int64x1_t([simd_extract!(a, 0)]) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16808,21 +17307,19 @@ pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_s64(a: int64x2_t) -> int64x1_t { + unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + int64x1_t([simd_extract!(a, 0)]) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16831,21 +17328,16 @@ pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { + unsafe { uint64x1_t([simd_extract!(a, 0)]) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) -)] +#[cfg_attr(test, assert_instr(nop))] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -16854,20 +17346,21 @@ pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { + unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + uint64x1_t([simd_extract!(a, 0)]) + } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(shadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -16877,20 +17370,26 @@ pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i8")] + fn _vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + unsafe { _vhadd_s8(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(shadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -16900,20 +17399,26 @@ pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v16i8")] + fn _vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + unsafe { _vhaddq_s8(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(shadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -16923,20 +17428,26 @@ pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i16")] + fn _vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + unsafe { _vhadd_s16(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(shadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -16946,20 +17457,26 @@ pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i16")] + fn _vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + unsafe { _vhaddq_s16(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(shadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -16969,20 +17486,26 @@ pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v2i32")] + fn _vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + unsafe { _vhadd_s32(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(shadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -16992,20 +17515,26 @@ pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i32")] + fn _vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + unsafe { _vhaddq_s32(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(uhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -17015,20 +17544,26 @@ pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i8")] + fn _vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; + } + unsafe { _vhadd_u8(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(uhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -17038,20 +17573,26 @@ pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v16i8")] + fn _vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; + } + unsafe { _vhaddq_u8(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(uhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -17061,20 +17602,26 @@ pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i16")] + fn _vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; + } + unsafe { _vhadd_u16(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(uhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -17084,20 +17631,26 @@ pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i16")] + fn _vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; + } + unsafe { _vhaddq_u16(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(uhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -17107,20 +17660,26 @@ pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v2i32")] + fn _vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; + } + unsafe { _vhadd_u32(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(uhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -17130,20 +17689,26 @@ pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { - crate::ptr::read_unaligned(a.cast()) -} -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +pub fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i32")] + fn _vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; + } + unsafe { _vhaddq_u32(a, b) } +} +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -17153,20 +17718,26 @@ pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i16")] + fn _vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + unsafe { _vhsub_s16(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -17176,20 +17747,26 @@ pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i16")] + fn _vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + unsafe { _vhsubq_s16(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -17199,20 +17776,26 @@ pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v2i32")] + fn _vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + unsafe { _vhsub_s32(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -17222,20 +17805,26 @@ pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i32")] + fn _vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + unsafe { _vhsubq_s32(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -17245,20 +17834,26 @@ pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i8")] + fn _vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + unsafe { _vhsub_s8(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(shsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -17268,20 +17863,26 @@ pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.shsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v16i8")] + fn _vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + unsafe { _vhsubq_s8(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(uhsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -17291,20 +17892,26 @@ pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i8")] + fn _vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; + } + unsafe { _vhsub_u8(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(uhsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -17314,20 +17921,26 @@ pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v16i8")] + fn _vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; + } + unsafe { _vhsubq_u8(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(uhsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -17337,20 +17950,153 @@ pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { - crate::ptr::read_unaligned(a.cast()) +pub fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i16")] + fn _vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; + } + unsafe { _vhsub_u16(a, b) } } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x3)"] +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i16")] + fn _vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; + } + unsafe { _vhsubq_u16(a, b) } +} +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v2i32")] + fn _vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; + } + unsafe { _vhsub_u32(a, b) } +} +#[doc = "Signed halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uhsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uhsub.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i32")] + fn _vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; + } + unsafe { _vhsubq_u32(a, b) } +} +#[doc = "Load one single-element structure and replicate to all lanes of one register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(ld1r) +)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld1_dup_f16(ptr: *const f16) -> float16x4_t { + let x: float16x4_t = vld1_lane_f16::<0>(ptr, transmute(f16x4::splat(0.0))); + simd_shuffle!(x, x, [0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and replicate to all lanes of one register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld1q_dup_f16(ptr: *const f16) -> float16x8_t { + let x: float16x8_t = vld1q_lane_f16::<0>(ptr, transmute(f16x8::splat(0.0))); + simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) )] #[cfg_attr( not(target_arch = "arm"), @@ -17360,20 +18106,20 @@ pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { - crate::ptr::read_unaligned(a.cast()) +pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { + transmute(f32x2::splat(*ptr)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x4)"] +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(ld1r) )] #[cfg_attr( not(target_arch = "arm"), @@ -17383,20 +18129,20 @@ pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { - crate::ptr::read_unaligned(a.cast()) +pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { + transmute(u16x4::splat(*ptr)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x2)"] +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(ld1r) )] #[cfg_attr( not(target_arch = "arm"), @@ -17406,20 +18152,20 @@ pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { - crate::ptr::read_unaligned(a.cast()) +pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { + transmute(u8x8::splat(*ptr)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x3)"] +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(ld1r) )] #[cfg_attr( not(target_arch = "arm"), @@ -17429,20 +18175,20 @@ pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { - crate::ptr::read_unaligned(a.cast()) +pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { + transmute(i16x4::splat(*ptr)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x4)"] +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(ld1r) )] #[cfg_attr( not(target_arch = "arm"), @@ -17452,20 +18198,20 @@ pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { - crate::ptr::read_unaligned(a.cast()) +pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { + transmute(i32x2::splat(*ptr)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x2)"] +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(ld1r) )] #[cfg_attr( not(target_arch = "arm"), @@ -17475,20 +18221,20 @@ pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { - crate::ptr::read_unaligned(a.cast()) +pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { + transmute(i8x8::splat(*ptr)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x3)"] +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(ld1r) )] #[cfg_attr( not(target_arch = "arm"), @@ -17498,20 +18244,20 @@ pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { - crate::ptr::read_unaligned(a.cast()) +pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { + transmute(u16x4::splat(*ptr)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x4)"] +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(ld1r) )] #[cfg_attr( not(target_arch = "arm"), @@ -17521,20 +18267,20 @@ pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { - crate::ptr::read_unaligned(a.cast()) +pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { + transmute(u32x2::splat(*ptr)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x2)"] +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(ld1r) )] #[cfg_attr( not(target_arch = "arm"), @@ -17544,20 +18290,20 @@ pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { - crate::ptr::read_unaligned(a.cast()) +pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { + transmute(u8x8::splat(*ptr)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x3)"] +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(ld1r) )] #[cfg_attr( not(target_arch = "arm"), @@ -17567,20 +18313,20 @@ pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { - crate::ptr::read_unaligned(a.cast()) +pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t { + transmute(f32x4::splat(*ptr)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x4)"] +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(ld1r) )] #[cfg_attr( not(target_arch = "arm"), @@ -17590,20 +18336,20 @@ pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { - crate::ptr::read_unaligned(a.cast()) +pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { + transmute(u16x8::splat(*ptr)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x2)"] +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(ld1r) )] #[cfg_attr( not(target_arch = "arm"), @@ -17613,20 +18359,20 @@ pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { - crate::ptr::read_unaligned(a.cast()) +pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { + transmute(u8x16::splat(*ptr)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x3)"] +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(ld1r) )] #[cfg_attr( not(target_arch = "arm"), @@ -17636,20 +18382,20 @@ pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { - crate::ptr::read_unaligned(a.cast()) +pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { + transmute(i16x8::splat(*ptr)) } -#[doc = "Load multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x4)"] +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld) + assert_instr(ld1r) )] #[cfg_attr( not(target_arch = "arm"), @@ -17659,568 +18405,591 @@ pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { - crate::ptr::read_unaligned(a.cast()) +pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t { + transmute(i32x4::splat(*ptr)) } +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[rustc_legacy_const_generics(1)] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1_v1i64(a: *const i8) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v1i64")] - fn _vld1_v1i64(a: *const i8, b: i32) -> int64x1_t; - } - _vld1_v1i64(a, ALIGN) -} -#[inline] -#[rustc_legacy_const_generics(1)] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1_v2f32(a: *const i8) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2f32")] - fn _vld1_v2f32(a: *const i8, b: i32) -> float32x2_t; - } - _vld1_v2f32(a, ALIGN) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { + transmute(i64x2::splat(*ptr)) } +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[rustc_legacy_const_generics(1)] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1_v2i32(a: *const i8) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i32")] - fn _vld1_v2i32(a: *const i8, b: i32) -> int32x2_t; - } - _vld1_v2i32(a, ALIGN) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { + transmute(i8x16::splat(*ptr)) } +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[rustc_legacy_const_generics(1)] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1_v4i16(a: *const i8) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i16")] - fn _vld1_v4i16(a: *const i8, b: i32) -> int16x4_t; - } - _vld1_v4i16(a, ALIGN) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { + transmute(u16x8::splat(*ptr)) } +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[rustc_legacy_const_generics(1)] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1_v8i8(a: *const i8) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i8")] - fn _vld1_v8i8(a: *const i8, b: i32) -> int8x8_t; - } - _vld1_v8i8(a, ALIGN) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t { + transmute(u32x4::splat(*ptr)) } +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[rustc_legacy_const_generics(1)] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1q_v16i8(a: *const i8) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v16i8")] - fn _vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t; - } - _vld1q_v16i8(a, ALIGN) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { + transmute(u64x2::splat(*ptr)) } +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[rustc_legacy_const_generics(1)] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1q_v2i64(a: *const i8) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i64")] - fn _vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t; - } - _vld1q_v2i64(a, ALIGN) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t { + transmute(u8x16::splat(*ptr)) } +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[rustc_legacy_const_generics(1)] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1q_v4f32(a: *const i8) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4f32")] - fn _vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t; +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ldr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_dup_p64(ptr: *const p64) -> poly64x1_t { + let x: poly64x1_t; + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = crate::core_arch::aarch64::vld1_p64(ptr); } - _vld1q_v4f32(a, ALIGN) + #[cfg(target_arch = "arm")] + { + x = crate::core_arch::arm::vld1_p64(ptr); + }; + x } +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[rustc_legacy_const_generics(1)] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1q_v4i32(a: *const i8) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i32")] - fn _vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t; +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ldr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t { + let x: int64x1_t; + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = crate::core_arch::aarch64::vld1_s64(ptr); } - _vld1q_v4i32(a, ALIGN) + #[cfg(target_arch = "arm")] + { + x = crate::core_arch::arm::vld1_s64(ptr); + }; + x } +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[rustc_legacy_const_generics(1)] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -unsafe fn vld1q_v8i16(a: *const i8) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i16")] - fn _vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t; +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ldr) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t { + let x: uint64x1_t; + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + { + x = crate::core_arch::aarch64::vld1_u64(ptr); } - _vld1q_v8i16(a, ALIGN) + #[cfg(target_arch = "arm")] + { + x = crate::core_arch::arm::vld1_u64(ptr); + }; + x } +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -unsafe fn vld1_v4f16(a: *const i8, b: i32) -> float16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4f16")] - fn _vld1_v4f16(a: *const i8, b: i32) -> float16x4_t; - } - _vld1_v4f16(a, b) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t { + transmute(vld1_v4f16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) } +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -unsafe fn vld1q_v8f16(a: *const i8, b: i32) -> float16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8f16")] - fn _vld1q_v8f16(a: *const i8, b: i32) -> float16x8_t; - } - _vld1q_v8f16(a, b) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t { + transmute(vld1q_v8f16( + ptr as *const i8, + crate::mem::align_of::() as i32, + )) } -#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_p64)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld1r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(ld) )] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld1_f16_x2(a: *const f16) -> float16x4x2_t { + crate::ptr::read_unaligned(a.cast()) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) )] -pub unsafe fn vld1q_dup_p64(ptr: *const p64) -> poly64x2_t { - let x = vld1q_lane_p64::<0>(ptr, transmute(u64x2::splat(0))); - simd_shuffle!(x, x, [0, 0]) +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld1_f16_x3(a: *const f16) -> float16x4x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld2_dup_f16(a: *const f16) -> float16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f16.p0")] - fn _vld2_dup_f16(ptr: *const f16, size: i32) -> float16x4x2_t; - } - _vld2_dup_f16(a as _, 2) +pub unsafe fn vld1_f16_x4(a: *const f16) -> float16x4x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld2q_dup_f16(a: *const f16) -> float16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8f16.p0")] - fn _vld2q_dup_f16(ptr: *const f16, size: i32) -> float16x8x2_t; - } - _vld2q_dup_f16(a as _, 2) +pub unsafe fn vld1q_f16_x2(a: *const f16) -> float16x8x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(ld) )] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld2_dup_f16(a: *const f16) -> float16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4f16.p0" - )] - fn _vld2_dup_f16(ptr: *const f16) -> float16x4x2_t; - } - _vld2_dup_f16(a as _) +pub unsafe fn vld1q_f16_x3(a: *const f16) -> float16x8x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(ld) )] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld2q_dup_f16(a: *const f16) -> float16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v8f16.p0" - )] - fn _vld2q_dup_f16(ptr: *const f16) -> float16x8x2_t; - } - _vld2q_dup_f16(a as _) +pub unsafe fn vld1q_f16_x4(a: *const f16) -> float16x8x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2f32.p0")] - fn _vld2_dup_f32(ptr: *const i8, size: i32) -> float32x2x2_t; - } - _vld2_dup_f32(a as *const i8, 4) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + transmute(vld1_v2f32::(ptr as *const i8)) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f32.p0")] - fn _vld2q_dup_f32(ptr: *const i8, size: i32) -> float32x4x2_t; - } - _vld2q_dup_f32(a as *const i8, 4) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + transmute(vld1q_v4f32::(ptr as *const i8)) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i8.p0")] - fn _vld2_dup_s8(ptr: *const i8, size: i32) -> int8x8x2_t; - } - _vld2_dup_s8(a as *const i8, 1) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + transmute(vld1_v8i8::(ptr as *const i8)) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v16i8.p0")] - fn _vld2q_dup_s8(ptr: *const i8, size: i32) -> int8x16x2_t; - } - _vld2q_dup_s8(a as *const i8, 1) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + transmute(vld1q_v16i8::(ptr as *const i8)) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i16.p0")] - fn _vld2_dup_s16(ptr: *const i8, size: i32) -> int16x4x2_t; - } - _vld2_dup_s16(a as *const i8, 2) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + transmute(vld1_v4i16::(ptr as *const i8)) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i16.p0")] - fn _vld2q_dup_s16(ptr: *const i8, size: i32) -> int16x8x2_t; - } - _vld2q_dup_s16(a as *const i8, 2) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + transmute(vld1q_v8i16::(ptr as *const i8)) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2i32.p0")] - fn _vld2_dup_s32(ptr: *const i8, size: i32) -> int32x2x2_t; - } - _vld2_dup_s32(a as *const i8, 4) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + transmute(vld1_v2i32::(ptr as *const i8)) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i32.p0")] - fn _vld2q_dup_s32(ptr: *const i8, size: i32) -> int32x4x2_t; - } - _vld2q_dup_s32(a as *const i8, 4) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + transmute(vld1q_v4i32::(ptr as *const i8)) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2f32.p0" - )] - fn _vld2_dup_f32(ptr: *const f32) -> float32x2x2_t; - } - _vld2_dup_f32(a as _) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + transmute(vld1_v1i64::(ptr as *const i8)) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4f32.p0" - )] - fn _vld2q_dup_f32(ptr: *const f32) -> float32x4x2_t; - } - _vld2q_dup_f32(a as _) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + transmute(vld1q_v2i64::(ptr as *const i8)) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v8i8.p0" - )] - fn _vld2_dup_s8(ptr: *const i8) -> int8x8x2_t; - } - _vld2_dup_s8(a as _) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + transmute(vld1_v8i8::(ptr as *const i8)) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v16i8.p0" - )] - fn _vld2q_dup_s8(ptr: *const i8) -> int8x16x2_t; - } - _vld2q_dup_s8(a as _) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + transmute(vld1q_v16i8::(ptr as *const i8)) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4i16.p0" - )] - fn _vld2_dup_s16(ptr: *const i16) -> int16x4x2_t; - } - _vld2_dup_s16(a as _) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + transmute(vld1_v4i16::(ptr as *const i8)) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v8i16.p0" - )] - fn _vld2q_dup_s16(ptr: *const i16) -> int16x8x2_t; - } - _vld2q_dup_s16(a as _) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + transmute(vld1q_v8i16::(ptr as *const i8)) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v2i32.p0" - )] - fn _vld2_dup_s32(ptr: *const i32) -> int32x2x2_t; - } - _vld2_dup_s32(a as _) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + transmute(vld1q_v2i64::(ptr as *const i8)) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v4i32.p0" - )] - fn _vld2q_dup_s32(ptr: *const i32) -> int32x4x2_t; - } - _vld2q_dup_s32(a as _) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -18230,55 +18999,20 @@ pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t { - transmute(vld2_dup_s64(transmute(a))) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v1i64.p0")] - fn _vld2_dup_s64(ptr: *const i8, size: i32) -> int64x1x2_t; - } - _vld2_dup_s64(a as *const i8, 8) -} -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2r))] -pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2r.v1i64.p0" - )] - fn _vld2_dup_s64(ptr: *const i64) -> int64x1x2_t; - } - _vld2_dup_s64(a as _) +pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u64)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -18288,20 +19022,20 @@ pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { - transmute(vld2_dup_s64(transmute(a))) +pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -18311,20 +19045,20 @@ pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { - transmute(vld2_dup_s8(transmute(a))) +pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -18334,20 +19068,20 @@ pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { - transmute(vld2q_dup_s8(transmute(a))) +pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -18357,20 +19091,20 @@ pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { - transmute(vld2_dup_s16(transmute(a))) +pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -18380,20 +19114,61 @@ pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { - transmute(vld2q_dup_s16(transmute(a))) +pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u32)"] +#[doc = "Load one single-element structure to one lane of one register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(ld1, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld1_lane_f16(ptr: *const f16, src: float16x4_t) -> float16x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld1q_lane_f16(ptr: *const f16, src: float16x8_t) -> float16x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 1) )] #[cfg_attr( not(target_arch = "arm"), @@ -18403,20 +19178,22 @@ pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { - transmute(vld2_dup_s32(transmute(a))) +pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u32)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(ld1, LANE = 3) )] #[cfg_attr( not(target_arch = "arm"), @@ -18426,20 +19203,22 @@ pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { - transmute(vld2q_dup_s32(transmute(a))) +pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p8)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(ld1, LANE = 7) )] #[cfg_attr( not(target_arch = "arm"), @@ -18449,20 +19228,22 @@ pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { - transmute(vld2_dup_s8(transmute(a))) +pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p8)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(ld1, LANE = 3) )] #[cfg_attr( not(target_arch = "arm"), @@ -18472,20 +19253,22 @@ pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { - transmute(vld2q_dup_s8(transmute(a))) +pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p16)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(ld1, LANE = 1) )] #[cfg_attr( not(target_arch = "arm"), @@ -18495,20 +19278,22 @@ pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { - transmute(vld2_dup_s16(transmute(a))) +pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p16)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2r) + assert_instr(ldr, LANE = 0) )] #[cfg_attr( not(target_arch = "arm"), @@ -18518,809 +19303,778 @@ pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { - transmute(vld2q_dup_s16(transmute(a))) +pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> int64x1_t { + static_assert!(LANE == 0); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f16)"] -#[doc = "## Safety"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_s8)"] +#[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld2_f16(a: *const f16) -> float16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f16.p0")] - fn _vld2_f16(ptr: *const f16, size: i32) -> float16x4x2_t; - } - _vld2_f16(a as _, 2) +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 7) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f16)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld2q_f16(a: *const f16) -> float16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8f16.p0")] - fn _vld2q_f16(ptr: *const f16, size: i32) -> float16x8x2_t; - } - _vld2q_f16(a as _, 2) +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f16)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) + assert_instr(ld1, LANE = 1) )] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld2_f16(a: *const f16) -> float16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4f16.p0" - )] - fn _vld2_f16(ptr: *const f16) -> float16x4x2_t; - } - _vld2_f16(a as _) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f16)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) + assert_instr(ldr, LANE = 0) )] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld2q_f16(a: *const f16) -> float16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v8f16.p0" - )] - fn _vld2q_f16(ptr: *const f16) -> float16x8x2_t; - } - _vld2q_f16(a as _) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) -> uint64x1_t { + static_assert!(LANE == 0); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2f32")] - fn _vld2_f32(ptr: *const i8, size: i32) -> float32x2x2_t; - } - _vld2_f32(a as *const i8, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 7) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f32")] - fn _vld2q_f32(ptr: *const i8, size: i32) -> float32x4x2_t; - } - _vld2q_f32(a as *const i8, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i8")] - fn _vld2_s8(ptr: *const i8, size: i32) -> int8x8x2_t; - } - _vld2_s8(a as *const i8, 1) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 7) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v16i8")] - fn _vld2q_s8(ptr: *const i8, size: i32) -> int8x16x2_t; - } - _vld2q_s8(a as *const i8, 1) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 15) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(LANE, 4); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i16")] - fn _vld2_s16(ptr: *const i8, size: i32) -> int16x4x2_t; - } - _vld2_s16(a as *const i8, 2) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 7) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i16")] - fn _vld2q_s16(ptr: *const i8, size: i32) -> int16x8x2_t; - } - _vld2q_s16(a as *const i8, 2) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2i32")] - fn _vld2_s32(ptr: *const i8, size: i32) -> int32x2x2_t; - } - _vld2_s32(a as *const i8, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld2))] -pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i32")] - fn _vld2q_s32(ptr: *const i8, size: i32) -> int32x4x2_t; - } - _vld2q_s32(a as *const i8, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 15) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(LANE, 4); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2f32.p0" - )] - fn _vld2_f32(ptr: *const float32x2_t) -> float32x2x2_t; - } - _vld2_f32(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16", LANE = 7))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 7) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4f32.p0" - )] - fn _vld2q_f32(ptr: *const float32x4_t) -> float32x4x2_t; - } - _vld2q_f32(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32", LANE = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v8i8.p0" - )] - fn _vld2_s8(ptr: *const int8x8_t) -> int8x8x2_t; - } - _vld2_s8(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v16i8.p0" - )] - fn _vld2q_s8(ptr: *const int8x16_t) -> int8x16x2_t; - } - _vld2q_s8(a as _) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", LANE = 15))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 15) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(LANE, 4); + simd_insert!(src, LANE as u32, *ptr) +} +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4i16.p0" - )] - fn _vld2_s16(ptr: *const int16x4_t) -> int16x4x2_t; - } - _vld2_s16(a as _) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ldr, LANE = 0) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_lane_p64(ptr: *const p64, src: poly64x1_t) -> poly64x1_t { + static_assert!(LANE == 0); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] +#[doc = "Load one single-element structure to one lane of one register."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v8i16.p0" - )] - fn _vld2q_s16(ptr: *const int16x8_t) -> int16x8x2_t; - } - _vld2q_s16(a as _) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr, LANE = 1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1, LANE = 1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_lane_p64(ptr: *const p64, src: poly64x2_t) -> poly64x2_t { + static_assert_uimm_bits!(LANE, 1); + simd_insert!(src, LANE as u32, *ptr) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,aes")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { + let a: *const i8 = ptr as *const i8; + let b: i32 = crate::mem::align_of::() as i32; unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v2i32.p0" - )] - fn _vld2_s32(ptr: *const int32x2_t) -> int32x2x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v1i64")] + fn _vld1_v1i64(a: *const i8, b: i32) -> int64x1_t; } - _vld2_s32(a as _) + transmute(_vld1_v1i64(a, b)) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld2))] -pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v4i32.p0" - )] - fn _vld2q_s32(ptr: *const int32x4_t) -> int32x4x2_t; - } - _vld2q_s32(a as _) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld2_lane_f16(a: *const f16, b: float16x4x2_t) -> float16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f16.p0")] - fn _vld2_lane_f16( - ptr: *const f16, - a: float16x4_t, - b: float16x4_t, - n: i32, - size: i32, - ) -> float16x4x2_t; - } - _vld2_lane_f16(a as _, b.0, b.1, LANE, 2) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld2q_lane_f16(a: *const f16, b: float16x8x2_t) -> float16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8f16.p0")] - fn _vld2q_lane_f16( - ptr: *const f16, - a: float16x8_t, - b: float16x8_t, - n: i32, - size: i32, - ) -> float16x8x2_t; - } - _vld2q_lane_f16(a as _, b.0, b.1, LANE, 2) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(ld) )] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld2_lane_f16(a: *const f16, b: float16x4x2_t) -> float16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4f16.p0" - )] - fn _vld2_lane_f16(a: float16x4_t, b: float16x4_t, n: i64, ptr: *const f16) - -> float16x4x2_t; - } - _vld2_lane_f16(b.0, b.1, LANE as i64, a as _) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(ld) )] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld2q_lane_f16(a: *const f16, b: float16x8x2_t) -> float16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v8f16.p0" - )] - fn _vld2q_lane_f16( - a: float16x8_t, - b: float16x8_t, - n: i64, - ptr: *const f16, - ) -> float16x8x2_t; - } - _vld2q_lane_f16(b.0, b.1, LANE as i64, a as _) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0" - )] - fn _vld2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *const i8) -> float32x2x2_t; - } - _vld2_lane_f32(b.0, b.1, LANE as i64, a as _) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0" - )] - fn _vld2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *const i8) - -> float32x4x2_t; - } - _vld2q_lane_f32(b.0, b.1, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vld1_v8i8::(ptr as *const i8) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0" - )] - fn _vld2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *const i8) -> int8x8x2_t; - } - _vld2_lane_s8(b.0, b.1, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] +pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vld1q_v16i8::(ptr as *const i8) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0" - )] - fn _vld2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *const i8) -> int16x4x2_t; - } - _vld2_lane_s16(b.0, b.1, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vld1_v4i16::(ptr as *const i8) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0" - )] - fn _vld2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *const i8) -> int16x8x2_t; - } - _vld2q_lane_s16(b.0, b.1, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] +pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vld1q_v8i16::(ptr as *const i8) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0" - )] - fn _vld2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *const i8) -> int32x2x2_t; - } - _vld2_lane_s32(b.0, b.1, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vld1_v2i32::(ptr as *const i8) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0" - )] - fn _vld2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *const i8) -> int32x4x2_t; - } - _vld2q_lane_s32(b.0, b.1, LANE as i64, a as _) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] +pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vld1q_v4i32::(ptr as *const i8) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0")] - fn _vld2_lane_f32( - ptr: *const i8, - a: float32x2_t, - b: float32x2_t, - n: i32, - size: i32, - ) -> float32x2x2_t; - } - _vld2_lane_f32(a as _, b.0, b.1, LANE, 4) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0")] - fn _vld2q_lane_f32( - ptr: *const i8, - a: float32x4_t, - b: float32x4_t, - n: i32, - size: i32, - ) -> float32x4x2_t; - } - _vld2q_lane_f32(a as _, b.0, b.1, LANE, 4) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0")] - fn _vld2q_lane_s16( - ptr: *const i8, - a: int16x8_t, - b: int16x8_t, - n: i32, - size: i32, - ) -> int16x8x2_t; - } - _vld2q_lane_s16(a as _, b.0, b.1, LANE, 2) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vld1_v1i64::(ptr as *const i8) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0")] - fn _vld2q_lane_s32( - ptr: *const i8, - a: int32x4_t, - b: int32x4_t, - n: i32, - size: i32, - ) -> int32x4x2_t; - } - _vld2q_lane_s32(a as _, b.0, b.1, LANE, 4) -} -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] #[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0")] - fn _vld2_lane_s8(ptr: *const i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32) - -> int8x8x2_t; - } - _vld2_lane_s8(a as _, b.0, b.1, LANE, 1) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.64"))] +pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vld1q_v2i64::(ptr as *const i8) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0")] - fn _vld2_lane_s16( - ptr: *const i8, - a: int16x4_t, - b: int16x4_t, - n: i32, - size: i32, - ) -> int16x4x2_t; - } - _vld2_lane_s16(a as _, b.0, b.1, LANE, 2) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0")] - fn _vld2_lane_s32( - ptr: *const i8, - a: int32x2_t, - b: int32x2_t, - n: i32, - size: i32, - ) -> int32x2x2_t; - } - _vld2_lane_s32(a as _, b.0, b.1, LANE, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(ld) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19329,23 +20083,21 @@ pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> i target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uint8x8x2_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld2_lane_s8::(transmute(a), transmute(b))) +pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(ld) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19354,23 +20106,21 @@ pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uin target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> uint16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld2_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(ld) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19379,23 +20129,21 @@ pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> uint16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld2q_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(ld) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19404,23 +20152,21 @@ pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> uint32x2x2_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld2_lane_s32::(transmute(a), transmute(b))) +pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(ld) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19429,23 +20175,21 @@ pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> uint32x4x2_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld2q_lane_s32::(transmute(a), transmute(b))) +pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(ld) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19454,23 +20198,21 @@ pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> poly8x8x2_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld2_lane_s8::(transmute(a), transmute(b))) +pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(ld) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19479,23 +20221,21 @@ pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> pol target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> poly16x4x2_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld2_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2, LANE = 0) + assert_instr(ld) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -19504,21 +20244,20 @@ pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> poly16x8x2_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld2q_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p64)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -19528,55 +20267,66 @@ pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t { - transmute(vld2_s64(transmute(a))) +pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v1i64")] - fn _vld2_s64(ptr: *const i8, size: i32) -> int64x1x2_t; - } - _vld2_s64(a as *const i8, 8) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld2.v1i64.p0" - )] - fn _vld2_s64(ptr: *const int64x1_t) -> int64x1x2_t; - } - _vld2_s64(a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u64)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -19586,20 +20336,20 @@ pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { - transmute(vld2_s64(transmute(a))) +pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -19609,20 +20359,20 @@ pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { - transmute(vld2_s8(transmute(a))) +pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -19632,20 +20382,20 @@ pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { - transmute(vld2q_s8(transmute(a))) +pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -19655,20 +20405,20 @@ pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { - transmute(vld2_s16(transmute(a))) +pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -19678,20 +20428,20 @@ pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { - transmute(vld2q_s16(transmute(a))) +pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -19701,20 +20451,20 @@ pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { - transmute(vld2_s32(transmute(a))) +pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -19724,20 +20474,20 @@ pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { - transmute(vld2q_s32(transmute(a))) +pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -19747,20 +20497,20 @@ pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { - transmute(vld2_s8(transmute(a))) +pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -19770,20 +20520,20 @@ pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { - transmute(vld2q_s8(transmute(a))) +pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -19793,20 +20543,20 @@ pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { - transmute(vld2_s16(transmute(a))) +pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 2-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld2) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -19816,405 +20566,43 @@ pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { - transmute(vld2q_s16(transmute(a))) +pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld3_dup_f16(a: *const f16) -> float16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f16.p0")] - fn _vld3_dup_f16(ptr: *const f16, size: i32) -> float16x4x3_t; - } - _vld3_dup_f16(a as _, 2) -} -#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld3q_dup_f16(a: *const f16) -> float16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8f16.p0")] - fn _vld3q_dup_f16(ptr: *const f16, size: i32) -> float16x8x3_t; - } - _vld3q_dup_f16(a as _, 2) -} -#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(ld) )] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld3_dup_f16(a: *const f16) -> float16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4f16.p0" - )] - fn _vld3_dup_f16(ptr: *const f16) -> float16x4x3_t; - } - _vld3_dup_f16(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld3q_dup_f16(a: *const f16) -> float16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v8f16.p0" - )] - fn _vld3q_dup_f16(ptr: *const f16) -> float16x8x3_t; - } - _vld3q_dup_f16(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2f32.p0" - )] - fn _vld3_dup_f32(ptr: *const f32) -> float32x2x3_t; - } - _vld3_dup_f32(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4f32.p0" - )] - fn _vld3q_dup_f32(ptr: *const f32) -> float32x4x3_t; - } - _vld3q_dup_f32(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v8i8.p0" - )] - fn _vld3_dup_s8(ptr: *const i8) -> int8x8x3_t; - } - _vld3_dup_s8(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v16i8.p0" - )] - fn _vld3q_dup_s8(ptr: *const i8) -> int8x16x3_t; - } - _vld3q_dup_s8(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4i16.p0" - )] - fn _vld3_dup_s16(ptr: *const i16) -> int16x4x3_t; - } - _vld3_dup_s16(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v8i16.p0" - )] - fn _vld3q_dup_s16(ptr: *const i16) -> int16x8x3_t; - } - _vld3q_dup_s16(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v2i32.p0" - )] - fn _vld3_dup_s32(ptr: *const i32) -> int32x2x3_t; - } - _vld3_dup_s32(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v4i32.p0" - )] - fn _vld3q_dup_s32(ptr: *const i32) -> int32x4x3_t; - } - _vld3q_dup_s32(a as _) +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld3r))] -pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3r.v1i64.p0" - )] - fn _vld3_dup_s64(ptr: *const i64) -> int64x1x3_t; - } - _vld3_dup_s64(a as _) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2f32.p0")] - fn _vld3_dup_f32(ptr: *const i8, size: i32) -> float32x2x3_t; - } - _vld3_dup_f32(a as *const i8, 4) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f32.p0")] - fn _vld3q_dup_f32(ptr: *const i8, size: i32) -> float32x4x3_t; - } - _vld3q_dup_f32(a as *const i8, 4) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i8.p0")] - fn _vld3_dup_s8(ptr: *const i8, size: i32) -> int8x8x3_t; - } - _vld3_dup_s8(a as *const i8, 1) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v16i8.p0")] - fn _vld3q_dup_s8(ptr: *const i8, size: i32) -> int8x16x3_t; - } - _vld3q_dup_s8(a as *const i8, 1) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i16.p0")] - fn _vld3_dup_s16(ptr: *const i8, size: i32) -> int16x4x3_t; - } - _vld3_dup_s16(a as *const i8, 2) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i16.p0")] - fn _vld3q_dup_s16(ptr: *const i8, size: i32) -> int16x8x3_t; - } - _vld3q_dup_s16(a as *const i8, 2) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2i32.p0")] - fn _vld3_dup_s32(ptr: *const i8, size: i32) -> int32x2x3_t; - } - _vld3_dup_s32(a as *const i8, 4) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i32.p0")] - fn _vld3q_dup_s32(ptr: *const i8, size: i32) -> int32x4x3_t; - } - _vld3q_dup_s32(a as *const i8, 4) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -20224,36 +20612,20 @@ pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t { - transmute(vld3_dup_s64(transmute(a))) -} -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v1i64.p0")] - fn _vld3_dup_s64(ptr: *const i8, size: i32) -> int64x1x3_t; - } - _vld3_dup_s64(a as *const i8, 8) +pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u64)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -20263,20 +20635,20 @@ pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { - transmute(vld3_dup_s64(transmute(a))) +pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -20286,20 +20658,20 @@ pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { - transmute(vld3_dup_s8(transmute(a))) +pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -20309,20 +20681,20 @@ pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { - transmute(vld3q_dup_s8(transmute(a))) +pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -20332,20 +20704,20 @@ pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { - transmute(vld3_dup_s16(transmute(a))) +pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -20355,20 +20727,20 @@ pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { - transmute(vld3q_dup_s16(transmute(a))) +pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -20378,20 +20750,20 @@ pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { - transmute(vld3_dup_s32(transmute(a))) +pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -20401,20 +20773,20 @@ pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { - transmute(vld3q_dup_s32(transmute(a))) +pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -20424,20 +20796,20 @@ pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { - transmute(vld3_dup_s8(transmute(a))) +pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -20447,20 +20819,20 @@ pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { - transmute(vld3q_dup_s8(transmute(a))) +pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -20470,20 +20842,20 @@ pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { - transmute(vld3_dup_s16(transmute(a))) +pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3r) + assert_instr(ld) )] #[cfg_attr( not(target_arch = "arm"), @@ -20493,361 +20865,760 @@ pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { - transmute(vld3q_dup_s16(transmute(a))) +pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld3_f16(a: *const f16) -> float16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f16.p0")] - fn _vld3_f16(ptr: *const f16, size: i32) -> float16x4x3_t; - } - _vld3_f16(a as _, 2) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld3q_f16(a: *const f16) -> float16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8f16.p0")] - fn _vld3q_f16(ptr: *const f16, size: i32) -> float16x8x3_t; - } - _vld3q_f16(a as _, 2) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld) )] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld3_f16(a: *const f16) -> float16x4x3_t { - crate::core_arch::macros::deinterleaving_load!(f16, 4, 3, a) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld) )] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld3q_f16(a: *const f16) -> float16x8x3_t { - crate::core_arch::macros::deinterleaving_load!(f16, 8, 3, a) +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { - crate::core_arch::macros::deinterleaving_load!(f32, 2, 3, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { - crate::core_arch::macros::deinterleaving_load!(f32, 4, 3, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { - crate::core_arch::macros::deinterleaving_load!(i8, 8, 3, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { - crate::core_arch::macros::deinterleaving_load!(i8, 16, 3, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { - crate::core_arch::macros::deinterleaving_load!(i16, 4, 3, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { - crate::core_arch::macros::deinterleaving_load!(i16, 8, 3, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { - crate::core_arch::macros::deinterleaving_load!(i32, 2, 3, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3))] -pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { - crate::core_arch::macros::deinterleaving_load!(i32, 4, 3, a) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0")] - fn _vld3_f32(ptr: *const i8, size: i32) -> float32x2x3_t; - } - _vld3_f32(a as *const i8, 4) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0")] - fn _vld3q_f32(ptr: *const i8, size: i32) -> float32x4x3_t; - } - _vld3q_f32(a as *const i8, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0")] - fn _vld3_s8(ptr: *const i8, size: i32) -> int8x8x3_t; - } - _vld3_s8(a as *const i8, 1) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0")] - fn _vld3q_s8(ptr: *const i8, size: i32) -> int8x16x3_t; - } - _vld3q_s8(a as *const i8, 1) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0")] - fn _vld3_s16(ptr: *const i8, size: i32) -> int16x4x3_t; - } - _vld3_s16(a as *const i8, 2) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0")] - fn _vld3q_s16(ptr: *const i8, size: i32) -> int16x8x3_t; - } - _vld3q_s16(a as *const i8, 2) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { + crate::ptr::read_unaligned(a.cast()) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { + crate::ptr::read_unaligned(a.cast()) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { + crate::ptr::read_unaligned(a.cast()) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { + crate::ptr::read_unaligned(a.cast()) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { + crate::ptr::read_unaligned(a.cast()) +} +#[doc = "Load multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { + crate::ptr::read_unaligned(a.cast()) +} +#[inline] +#[rustc_legacy_const_generics(1)] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { +unsafe fn vld1_v1i64(a: *const i8) -> int64x1_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0")] - fn _vld3_s32(ptr: *const i8, size: i32) -> int32x2x3_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v1i64")] + fn _vld1_v1i64(a: *const i8, b: i32) -> int64x1_t; } - _vld3_s32(a as *const i8, 4) + _vld1_v1i64(a, ALIGN) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] #[inline] +#[rustc_legacy_const_generics(1)] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld3))] -pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { +unsafe fn vld1_v2f32(a: *const i8) -> float32x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0")] - fn _vld3q_s32(ptr: *const i8, size: i32) -> int32x4x3_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2f32")] + fn _vld1_v2f32(a: *const i8, b: i32) -> float32x2_t; } - _vld3q_s32(a as *const i8, 4) + _vld1_v2f32(a, ALIGN) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f16)"] +#[inline] +#[rustc_legacy_const_generics(1)] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1_v2i32(a: *const i8) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i32")] + fn _vld1_v2i32(a: *const i8, b: i32) -> int32x2_t; + } + _vld1_v2i32(a, ALIGN) +} +#[inline] +#[rustc_legacy_const_generics(1)] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1_v4i16(a: *const i8) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i16")] + fn _vld1_v4i16(a: *const i8, b: i32) -> int16x4_t; + } + _vld1_v4i16(a, ALIGN) +} +#[inline] +#[rustc_legacy_const_generics(1)] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1_v8i8(a: *const i8) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i8")] + fn _vld1_v8i8(a: *const i8, b: i32) -> int8x8_t; + } + _vld1_v8i8(a, ALIGN) +} +#[inline] +#[rustc_legacy_const_generics(1)] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v16i8(a: *const i8) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v16i8")] + fn _vld1q_v16i8(a: *const i8, b: i32) -> int8x16_t; + } + _vld1q_v16i8(a, ALIGN) +} +#[inline] +#[rustc_legacy_const_generics(1)] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v2i64(a: *const i8) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v2i64")] + fn _vld1q_v2i64(a: *const i8, b: i32) -> int64x2_t; + } + _vld1q_v2i64(a, ALIGN) +} +#[inline] +#[rustc_legacy_const_generics(1)] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v4f32(a: *const i8) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4f32")] + fn _vld1q_v4f32(a: *const i8, b: i32) -> float32x4_t; + } + _vld1q_v4f32(a, ALIGN) +} +#[inline] +#[rustc_legacy_const_generics(1)] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v4i32(a: *const i8) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4i32")] + fn _vld1q_v4i32(a: *const i8, b: i32) -> int32x4_t; + } + _vld1q_v4i32(a, ALIGN) +} +#[inline] +#[rustc_legacy_const_generics(1)] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8", ALIGN = 0))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +unsafe fn vld1q_v8i16(a: *const i8) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8i16")] + fn _vld1q_v8i16(a: *const i8, b: i32) -> int16x8_t; + } + _vld1q_v8i16(a, ALIGN) +} +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg(not(target_arch = "arm64ec"))] +unsafe fn vld1_v4f16(a: *const i8, b: i32) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v4f16")] + fn _vld1_v4f16(a: *const i8, b: i32) -> float16x4_t; + } + _vld1_v4f16(a, b) +} +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg(not(target_arch = "arm64ec"))] +unsafe fn vld1q_v8f16(a: *const i8, b: i32) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1.v8f16")] + fn _vld1q_v8f16(a: *const i8, b: i32) -> float16x8_t; + } + _vld1q_v8f16(a, b) +} +#[doc = "Load one single-element structure and Replicate to all lanes (of one register)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld1r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld1q_dup_p64(ptr: *const p64) -> poly64x2_t { + let x = vld1q_lane_p64::<0>(ptr, transmute(u64x2::splat(0))); + simd_shuffle!(x, x, [0, 0]) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld3_lane_f16(a: *const f16, b: float16x4x3_t) -> float16x4x3_t { - static_assert_uimm_bits!(LANE, 2); +pub unsafe fn vld2_dup_f16(a: *const f16) -> float16x4x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f16.p0")] - fn _vld3_lane_f16( - ptr: *const f16, - a: float16x4_t, - b: float16x4_t, - c: float16x4_t, - n: i32, - size: i32, - ) -> float16x4x3_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f16.p0")] + fn _vld2_dup_f16(ptr: *const f16, size: i32) -> float16x4x2_t; } - _vld3_lane_f16(a as _, b.0, b.1, b.2, LANE, 2) + _vld2_dup_f16(a as _, 2) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld3q_lane_f16(a: *const f16, b: float16x8x3_t) -> float16x8x3_t { - static_assert_uimm_bits!(LANE, 3); +pub unsafe fn vld2q_dup_f16(a: *const f16) -> float16x8x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8f16.p0")] - fn _vld3q_lane_f16( - ptr: *const f16, - a: float16x8_t, - b: float16x8_t, - c: float16x8_t, - n: i32, - size: i32, - ) -> float16x8x3_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8f16.p0")] + fn _vld2q_dup_f16(ptr: *const f16, size: i32) -> float16x8x2_t; } - _vld3q_lane_f16(a as _, b.0, b.1, b.2, LANE, 2) + _vld2q_dup_f16(a as _, 2) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] @@ -20855,31 +21626,23 @@ pub unsafe fn vld3q_lane_f16(a: *const f16, b: float16x8x3_t) - #[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld3_lane_f16(a: *const f16, b: float16x4x3_t) -> float16x4x3_t { - static_assert_uimm_bits!(LANE, 2); +pub unsafe fn vld2_dup_f16(a: *const f16) -> float16x4x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4f16.p0" + link_name = "llvm.aarch64.neon.ld2r.v4f16.p0" )] - fn _vld3_lane_f16( - a: float16x4_t, - b: float16x4_t, - c: float16x4_t, - n: i64, - ptr: *const f16, - ) -> float16x4x3_t; + fn _vld2_dup_f16(ptr: *const f16) -> float16x4x2_t; } - _vld3_lane_f16(b.0, b.1, b.2, LANE as i64, a as _) + _vld2_dup_f16(a as _) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] @@ -20887,381 +21650,371 @@ pub unsafe fn vld3_lane_f16(a: *const f16, b: float16x4x3_t) -> #[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld3q_lane_f16(a: *const f16, b: float16x8x3_t) -> float16x8x3_t { - static_assert_uimm_bits!(LANE, 3); +pub unsafe fn vld2q_dup_f16(a: *const f16) -> float16x8x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v8f16.p0" + link_name = "llvm.aarch64.neon.ld2r.v8f16.p0" )] - fn _vld3q_lane_f16( - a: float16x8_t, - b: float16x8_t, - c: float16x8_t, - n: i64, - ptr: *const f16, - ) -> float16x8x3_t; + fn _vld2q_dup_f16(ptr: *const f16) -> float16x8x2_t; } - _vld3q_lane_f16(b.0, b.1, b.2, LANE as i64, a as _) + _vld2q_dup_f16(a as _) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { - static_assert_uimm_bits!(LANE, 1); +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0" - )] - fn _vld3_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - n: i64, - ptr: *const i8, - ) -> float32x2x3_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2f32.p0")] + fn _vld2_dup_f32(ptr: *const i8, size: i32) -> float32x2x2_t; } - _vld3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) + _vld2_dup_f32(a as *const i8, 4) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { - static_assert_uimm_bits!(LANE, 2); +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0" - )] - fn _vld3q_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - n: i64, - ptr: *const i8, - ) -> float32x4x3_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f32.p0")] + fn _vld2q_dup_f32(ptr: *const i8, size: i32) -> float32x4x2_t; } - _vld3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) + _vld2q_dup_f32(a as *const i8, 4) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0")] - fn _vld3_lane_f32( - ptr: *const i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - n: i32, - size: i32, - ) -> float32x2x3_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i8.p0")] + fn _vld2_dup_s8(ptr: *const i8, size: i32) -> int8x8x2_t; } - _vld3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) + _vld2_dup_s8(a as *const i8, 1) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v16i8.p0")] + fn _vld2q_dup_s8(ptr: *const i8, size: i32) -> int8x16x2_t; + } + _vld2q_dup_s8(a as *const i8, 1) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i16.p0")] + fn _vld2_dup_s16(ptr: *const i8, size: i32) -> int16x4x2_t; + } + _vld2_dup_s16(a as *const i8, 2) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i16.p0")] + fn _vld2q_dup_s16(ptr: *const i8, size: i32) -> int16x8x2_t; + } + _vld2q_dup_s16(a as *const i8, 2) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2i32.p0")] + fn _vld2_dup_s32(ptr: *const i8, size: i32) -> int32x2x2_t; + } + _vld2_dup_s32(a as *const i8, 4) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i32.p0")] + fn _vld2q_dup_s32(ptr: *const i8, size: i32) -> int32x4x2_t; + } + _vld2q_dup_s32(a as *const i8, 4) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0" + link_name = "llvm.aarch64.neon.ld2r.v2f32.p0" )] - fn _vld3_lane_s8( - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - n: i64, - ptr: *const i8, - ) -> int8x8x3_t; + fn _vld2_dup_f32(ptr: *const f32) -> float32x2x2_t; } - _vld3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) + _vld2_dup_f32(a as _) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0" + link_name = "llvm.aarch64.neon.ld2r.v4f32.p0" )] - fn _vld3_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - n: i64, - ptr: *const i8, - ) -> int16x4x3_t; + fn _vld2q_dup_f32(ptr: *const f32) -> float32x4x2_t; } - _vld3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) + _vld2q_dup_f32(a as _) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { - static_assert_uimm_bits!(LANE, 4); +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0" + link_name = "llvm.aarch64.neon.ld2r.v8i8.p0" )] - fn _vld3q_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - n: i64, - ptr: *const i8, - ) -> int16x8x3_t; + fn _vld2_dup_s8(ptr: *const i8) -> int8x8x2_t; } - _vld3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) + _vld2_dup_s8(a as _) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0" + link_name = "llvm.aarch64.neon.ld2r.v16i8.p0" )] - fn _vld3_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - n: i64, - ptr: *const i8, - ) -> int32x2x3_t; + fn _vld2q_dup_s8(ptr: *const i8) -> int8x16x2_t; } - _vld3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) + _vld2q_dup_s8(a as _) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0" + link_name = "llvm.aarch64.neon.ld2r.v4i16.p0" )] - fn _vld3q_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - n: i64, - ptr: *const i8, - ) -> int32x4x3_t; + fn _vld2_dup_s16(ptr: *const i16) -> int16x4x2_t; } - _vld3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) + _vld2_dup_s16(a as _) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { - static_assert_uimm_bits!(LANE, 3); +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0")] - fn _vld3_lane_s8( - ptr: *const i8, - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - n: i32, - size: i32, - ) -> int8x8x3_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v8i16.p0" + )] + fn _vld2q_dup_s16(ptr: *const i16) -> int16x8x2_t; } - _vld3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) + _vld2q_dup_s16(a as _) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { - static_assert_uimm_bits!(LANE, 2); +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0")] - fn _vld3_lane_s16( - ptr: *const i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - n: i32, - size: i32, - ) -> int16x4x3_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v2i32.p0" + )] + fn _vld2_dup_s32(ptr: *const i32) -> int32x2x2_t; } - _vld3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) + _vld2_dup_s32(a as _) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { - static_assert_uimm_bits!(LANE, 3); +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0")] - fn _vld3q_lane_s16( - ptr: *const i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - n: i32, - size: i32, - ) -> int16x8x3_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v4i32.p0" + )] + fn _vld2q_dup_s32(ptr: *const i32) -> int32x4x2_t; } - _vld3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) + _vld2q_dup_s32(a as _) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2r) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t { + transmute(vld2_dup_s64(transmute(a))) +} +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0")] - fn _vld3_lane_s32( - ptr: *const i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - n: i32, - size: i32, - ) -> int32x2x3_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v1i64.p0")] + fn _vld2_dup_s64(ptr: *const i8, size: i32) -> int64x1x2_t; } - _vld3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) + _vld2_dup_s64(a as *const i8, 8) } -#[doc = "Load multiple 3-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { - static_assert_uimm_bits!(LANE, 2); +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld2r))] +pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0")] - fn _vld3q_lane_s32( - ptr: *const i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - n: i32, - size: i32, - ) -> int32x4x3_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2r.v1i64.p0" + )] + fn _vld2_dup_s64(ptr: *const i64) -> int64x1x2_t; } - _vld3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) + _vld2_dup_s64(a as _) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u8)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21270,23 +22023,21 @@ pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld3_lane_s8::(transmute(a), transmute(b))) +pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { + transmute(vld2_dup_s64(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21295,23 +22046,21 @@ pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uin target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld3_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { + transmute(vld2_dup_s8(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21320,23 +22069,21 @@ pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld3q_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { + transmute(vld2q_dup_s8(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u32)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21345,23 +22092,21 @@ pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld3_lane_s32::(transmute(a), transmute(b))) +pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { + transmute(vld2_dup_s16(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u32)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21370,23 +22115,21 @@ pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld3q_lane_s32::(transmute(a), transmute(b))) +pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { + transmute(vld2q_dup_s16(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p8)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21395,23 +22138,21 @@ pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld3_lane_s8::(transmute(a), transmute(b))) +pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { + transmute(vld2_dup_s32(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21420,23 +22161,21 @@ pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> pol target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld3_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { + transmute(vld2q_dup_s32(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3, LANE = 0) + assert_instr(ld2r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -21445,21 +22184,20 @@ pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld3q_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { + transmute(vld2_dup_s8(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p64)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -21469,232 +22207,20 @@ pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { - transmute(vld3_s64(transmute(a))) +pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { + transmute(vld2q_dup_s8(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { - crate::ptr::read_unaligned(a.cast()) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v1i64.p0")] - fn _vld3_s64(ptr: *const i8, size: i32) -> int64x1x3_t; - } - _vld3_s64(a as *const i8, 8) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { - transmute(vld3_s64(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { - transmute(vld3_s8(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { - transmute(vld3q_s8(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { - transmute(vld3_s16(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { - transmute(vld3q_s16(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { - transmute(vld3_s32(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { - transmute(vld3q_s32(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { - transmute(vld3_s8(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -21704,20 +22230,20 @@ pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { - transmute(vld3q_s8(transmute(a))) +pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { + transmute(vld2_dup_s16(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) + assert_instr(ld2r) )] #[cfg_attr( not(target_arch = "arm"), @@ -21727,1533 +22253,1704 @@ pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { - transmute(vld3_s16(transmute(a))) +pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { + transmute(vld2q_dup_s16(transmute(a))) } -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld3) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { - transmute(vld3q_s16(transmute(a))) -} -#[doc = "Load multiple 3-element structures to three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0")] - fn _vld3q_lane_f32( - ptr: *const i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - n: i32, - size: i32, - ) -> float32x4x3_t; - } - _vld3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) -} -#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[target_feature(enable = "neon,fp16")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { +pub unsafe fn vld2_f16(a: *const f16) -> float16x4x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f16.p0")] - fn _vld4_dup_f16(ptr: *const f16, size: i32) -> float16x4x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f16.p0")] + fn _vld2_f16(ptr: *const f16, size: i32) -> float16x4x2_t; } - _vld4_dup_f16(a as _, 2) + _vld2_f16(a as _, 2) } -#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[target_feature(enable = "neon,fp16")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld4q_dup_f16(a: *const f16) -> float16x8x4_t { +pub unsafe fn vld2q_f16(a: *const f16) -> float16x8x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8f16.p0")] - fn _vld4q_dup_f16(ptr: *const f16, size: i32) -> float16x8x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8f16.p0")] + fn _vld2q_f16(ptr: *const f16, size: i32) -> float16x8x2_t; } - _vld4q_dup_f16(a as _, 2) + _vld2q_f16(a as _, 2) } -#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] +#[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(ld2) )] -#[target_feature(enable = "neon,fp16")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { +pub unsafe fn vld2_f16(a: *const f16) -> float16x4x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4f16.p0" + link_name = "llvm.aarch64.neon.ld2.v4f16.p0" )] - fn _vld4_dup_f16(ptr: *const f16) -> float16x4x4_t; + fn _vld2_f16(ptr: *const f16) -> float16x4x2_t; } - _vld4_dup_f16(a as _) + _vld2_f16(a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f16)"] +#[doc = "Load single 2-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] +#[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) + assert_instr(ld2) )] -#[target_feature(enable = "neon,fp16")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld4q_dup_f16(a: *const f16) -> float16x8x4_t { +pub unsafe fn vld2q_f16(a: *const f16) -> float16x8x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v8f16.p0" + link_name = "llvm.aarch64.neon.ld2.v8f16.p0" )] - fn _vld4q_dup_f16(ptr: *const f16) -> float16x8x4_t; + fn _vld2q_f16(ptr: *const f16) -> float16x8x2_t; } - _vld4q_dup_f16(a as _) + _vld2q_f16(a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0")] - fn _vld4_dup_f32(ptr: *const i8, size: i32) -> float32x2x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2f32")] + fn _vld2_f32(ptr: *const i8, size: i32) -> float32x2x2_t; } - _vld4_dup_f32(a as *const i8, 4) + _vld2_f32(a as *const i8, 4) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0")] - fn _vld4q_dup_f32(ptr: *const i8, size: i32) -> float32x4x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f32")] + fn _vld2q_f32(ptr: *const i8, size: i32) -> float32x4x2_t; } - _vld4q_dup_f32(a as *const i8, 4) + _vld2q_f32(a as *const i8, 4) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0")] - fn _vld4_dup_s8(ptr: *const i8, size: i32) -> int8x8x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i8")] + fn _vld2_s8(ptr: *const i8, size: i32) -> int8x8x2_t; } - _vld4_dup_s8(a as *const i8, 1) + _vld2_s8(a as *const i8, 1) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0")] - fn _vld4q_dup_s8(ptr: *const i8, size: i32) -> int8x16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v16i8")] + fn _vld2q_s8(ptr: *const i8, size: i32) -> int8x16x2_t; } - _vld4q_dup_s8(a as *const i8, 1) + _vld2q_s8(a as *const i8, 1) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0")] - fn _vld4_dup_s16(ptr: *const i8, size: i32) -> int16x4x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i16")] + fn _vld2_s16(ptr: *const i8, size: i32) -> int16x4x2_t; } - _vld4_dup_s16(a as *const i8, 2) + _vld2_s16(a as *const i8, 2) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0")] - fn _vld4q_dup_s16(ptr: *const i8, size: i32) -> int16x8x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i16")] + fn _vld2q_s16(ptr: *const i8, size: i32) -> int16x8x2_t; } - _vld4q_dup_s16(a as *const i8, 2) + _vld2q_s16(a as *const i8, 2) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0")] - fn _vld4_dup_s32(ptr: *const i8, size: i32) -> int32x2x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2i32")] + fn _vld2_s32(ptr: *const i8, size: i32) -> int32x2x2_t; } - _vld4_dup_s32(a as *const i8, 4) + _vld2_s32(a as *const i8, 4) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vld4))] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { +#[cfg_attr(test, assert_instr(vld2))] +pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0")] - fn _vld4q_dup_s32(ptr: *const i8, size: i32) -> int32x4x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i32")] + fn _vld2q_s32(ptr: *const i8, size: i32) -> int32x4x2_t; } - _vld4q_dup_s32(a as *const i8, 4) + _vld2q_s32(a as *const i8, 4) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2f32.p0.p0" + link_name = "llvm.aarch64.neon.ld2.v2f32.p0" )] - fn _vld4_dup_f32(ptr: *const f32) -> float32x2x4_t; + fn _vld2_f32(ptr: *const float32x2_t) -> float32x2x2_t; } - _vld4_dup_f32(a as _) + _vld2_f32(a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4f32.p0.p0" + link_name = "llvm.aarch64.neon.ld2.v4f32.p0" )] - fn _vld4q_dup_f32(ptr: *const f32) -> float32x4x4_t; + fn _vld2q_f32(ptr: *const float32x4_t) -> float32x4x2_t; } - _vld4q_dup_f32(a as _) + _vld2q_f32(a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v8i8.p0.p0" + link_name = "llvm.aarch64.neon.ld2.v8i8.p0" )] - fn _vld4_dup_s8(ptr: *const i8) -> int8x8x4_t; + fn _vld2_s8(ptr: *const int8x8_t) -> int8x8x2_t; } - _vld4_dup_s8(a as _) + _vld2_s8(a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v16i8.p0.p0" + link_name = "llvm.aarch64.neon.ld2.v16i8.p0" )] - fn _vld4q_dup_s8(ptr: *const i8) -> int8x16x4_t; + fn _vld2q_s8(ptr: *const int8x16_t) -> int8x16x2_t; } - _vld4q_dup_s8(a as _) + _vld2q_s8(a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4i16.p0.p0" + link_name = "llvm.aarch64.neon.ld2.v4i16.p0" )] - fn _vld4_dup_s16(ptr: *const i16) -> int16x4x4_t; + fn _vld2_s16(ptr: *const int16x4_t) -> int16x4x2_t; } - _vld4_dup_s16(a as _) + _vld2_s16(a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v8i16.p0.p0" + link_name = "llvm.aarch64.neon.ld2.v8i16.p0" )] - fn _vld4q_dup_s16(ptr: *const i16) -> int16x8x4_t; + fn _vld2q_s16(ptr: *const int16x8_t) -> int16x8x2_t; } - _vld4q_dup_s16(a as _) + _vld2q_s16(a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v2i32.p0.p0" + link_name = "llvm.aarch64.neon.ld2.v2i32.p0" )] - fn _vld4_dup_s32(ptr: *const i32) -> int32x2x4_t; + fn _vld2_s32(ptr: *const int32x2_t) -> int32x2x2_t; } - _vld4_dup_s32(a as _) + _vld2_s32(a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { +#[cfg_attr(test, assert_instr(ld2))] +pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v4i32.p0.p0" + link_name = "llvm.aarch64.neon.ld2.v4i32.p0" )] - fn _vld4q_dup_s32(ptr: *const i32) -> int32x4x4_t; + fn _vld2q_s32(ptr: *const int32x4_t) -> int32x4x2_t; } - _vld4q_dup_s32(a as _) + _vld2q_s32(a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4r))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld2_lane_f16(a: *const f16, b: float16x4x2_t) -> float16x4x2_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4r.v1i64.p0.p0" - )] - fn _vld4_dup_s64(ptr: *const i64) -> int64x1x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f16.p0")] + fn _vld2_lane_f16( + ptr: *const f16, + a: float16x4_t, + b: float16x4_t, + n: i32, + size: i32, + ) -> float16x4x2_t; } - _vld4_dup_s64(a as _) -} -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { - transmute(vld4_dup_s64(transmute(a))) + _vld2_lane_f16(a as _, b.0, b.1, LANE, 2) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(nop))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld2q_lane_f16(a: *const f16, b: float16x8x2_t) -> float16x8x2_t { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v1i64.p0")] - fn _vld4_dup_s64(ptr: *const i8, size: i32) -> int64x1x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8f16.p0")] + fn _vld2q_lane_f16( + ptr: *const f16, + a: float16x8_t, + b: float16x8_t, + n: i32, + size: i32, + ) -> float16x8x2_t; } - _vld4_dup_s64(a as *const i8, 8) + _vld2q_lane_f16(a as _, b.0, b.1, LANE, 2) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u64)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ld2, LANE = 0) )] -pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { - transmute(vld4_dup_s64(transmute(a))) +#[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld2_lane_f16(a: *const f16, b: float16x4x2_t) -> float16x4x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v4f16.p0" + )] + fn _vld2_lane_f16(a: float16x4_t, b: float16x4_t, n: i64, ptr: *const f16) + -> float16x4x2_t; + } + _vld2_lane_f16(b.0, b.1, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ld2, LANE = 0) )] -pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { - transmute(vld4_dup_s8(transmute(a))) +#[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld2q_lane_f16(a: *const f16, b: float16x8x2_t) -> float16x8x2_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v8f16.p0" + )] + fn _vld2q_lane_f16( + a: float16x8_t, + b: float16x8_t, + n: i64, + ptr: *const f16, + ) -> float16x8x2_t; + } + _vld2q_lane_f16(b.0, b.1, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { - transmute(vld4q_dup_s8(transmute(a))) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0" + )] + fn _vld2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *const i8) -> float32x2x2_t; + } + _vld2_lane_f32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { - transmute(vld4_dup_s16(transmute(a))) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0" + )] + fn _vld2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *const i8) + -> float32x4x2_t; + } + _vld2q_lane_f32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { - transmute(vld4q_dup_s16(transmute(a))) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0" + )] + fn _vld2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *const i8) -> int8x8x2_t; + } + _vld2_lane_s8(b.0, b.1, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { - transmute(vld4_dup_s32(transmute(a))) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0" + )] + fn _vld2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *const i8) -> int16x4x2_t; + } + _vld2_lane_s16(b.0, b.1, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { - transmute(vld4q_dup_s32(transmute(a))) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0" + )] + fn _vld2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *const i8) -> int16x8x2_t; + } + _vld2q_lane_s16(b.0, b.1, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { - transmute(vld4_dup_s8(transmute(a))) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0" + )] + fn _vld2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *const i8) -> int32x2x2_t; + } + _vld2_lane_s32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { - transmute(vld4q_dup_s8(transmute(a))) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0" + )] + fn _vld2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *const i8) -> int32x4x2_t; + } + _vld2q_lane_s32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { - transmute(vld4_dup_s16(transmute(a))) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0")] + fn _vld2_lane_f32( + ptr: *const i8, + a: float32x2_t, + b: float32x2_t, + n: i32, + size: i32, + ) -> float32x2x2_t; + } + _vld2_lane_f32(a as _, b.0, b.1, LANE, 4) } -#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4r) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { - transmute(vld4q_dup_s16(transmute(a))) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0")] + fn _vld2q_lane_f32( + ptr: *const i8, + a: float32x4_t, + b: float32x4_t, + n: i32, + size: i32, + ) -> float32x4x2_t; + } + _vld2q_lane_f32(a as _, b.0, b.1, LANE, 4) } -#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld4_f16(a: *const f16) -> float16x4x4_t { +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f16.p0")] - fn _vld4_f16(ptr: *const f16, size: i32) -> float16x4x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0")] + fn _vld2q_lane_s16( + ptr: *const i8, + a: int16x8_t, + b: int16x8_t, + n: i32, + size: i32, + ) -> int16x8x2_t; } - _vld4_f16(a as _, 2) + _vld2q_lane_s16(a as _, b.0, b.1, LANE, 2) } -#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld4q_f16(a: *const f16) -> float16x8x4_t { +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8f16.p0")] - fn _vld4q_f16(ptr: *const f16, size: i32) -> float16x8x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0")] + fn _vld2q_lane_s32( + ptr: *const i8, + a: int32x4_t, + b: int32x4_t, + n: i32, + size: i32, + ) -> int32x4x2_t; } - _vld4q_f16(a as _, 2) + _vld2q_lane_s32(a as _, b.0, b.1, LANE, 4) } -#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld4_f16(a: *const f16) -> float16x4x4_t { - crate::core_arch::macros::deinterleaving_load!(f16, 4, 4, a) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0")] + fn _vld2_lane_s8(ptr: *const i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32) + -> int8x8x2_t; + } + _vld2_lane_s8(a as _, b.0, b.1, LANE, 1) } -#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld4q_f16(a: *const f16) -> float16x8x4_t { - crate::core_arch::macros::deinterleaving_load!(f16, 8, 4, a) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0")] + fn _vld2_lane_s16( + ptr: *const i8, + a: int16x4_t, + b: int16x4_t, + n: i32, + size: i32, + ) -> int16x4x2_t; + } + _vld2_lane_s16(a as _, b.0, b.1, LANE, 2) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0")] + fn _vld2_lane_s32( + ptr: *const i8, + a: int32x2_t, + b: int32x2_t, + n: i32, + size: i32, + ) -> int32x2x2_t; + } + _vld2_lane_s32(a as _, b.0, b.1, LANE, 4) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { - crate::core_arch::macros::deinterleaving_load!(f32, 2, 4, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uint8x8x2_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld2_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { - crate::core_arch::macros::deinterleaving_load!(f32, 4, 4, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> uint16x4x2_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld2_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { - crate::core_arch::macros::deinterleaving_load!(i8, 8, 4, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> uint16x8x2_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld2q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { - crate::core_arch::macros::deinterleaving_load!(i8, 16, 4, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> uint32x2x2_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld2_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { - crate::core_arch::macros::deinterleaving_load!(i16, 4, 4, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> uint32x4x2_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld2q_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { - crate::core_arch::macros::deinterleaving_load!(i16, 8, 4, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> poly8x8x2_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld2_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { - crate::core_arch::macros::deinterleaving_load!(i32, 2, 4, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> poly16x4x2_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld2_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(ld4))] -pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { - crate::core_arch::macros::deinterleaving_load!(i32, 4, 4, a) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> poly16x8x2_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld2q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0")] - fn _vld4_f32(ptr: *const i8, size: i32) -> float32x2x4_t; - } - _vld4_f32(a as *const i8, 4) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t { + transmute(vld2_s64(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0")] - fn _vld4q_f32(ptr: *const i8, size: i32) -> float32x4x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v1i64")] + fn _vld2_s64(ptr: *const i8, size: i32) -> int64x1x2_t; } - _vld4q_f32(a as *const i8, 4) + _vld2_s64(a as *const i8, 8) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0")] - fn _vld4_s8(ptr: *const i8, size: i32) -> int8x8x4_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld2.v1i64.p0" + )] + fn _vld2_s64(ptr: *const int64x1_t) -> int64x1x2_t; } - _vld4_s8(a as *const i8, 1) + _vld2_s64(a as _) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0")] - fn _vld4q_s8(ptr: *const i8, size: i32) -> int8x16x4_t; - } - _vld4q_s8(a as *const i8, 1) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { + transmute(vld2_s64(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0")] - fn _vld4_s16(ptr: *const i8, size: i32) -> int16x4x4_t; - } - _vld4_s16(a as *const i8, 2) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { + transmute(vld2_s8(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0")] - fn _vld4q_s16(ptr: *const i8, size: i32) -> int16x8x4_t; - } - _vld4q_s16(a as *const i8, 2) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { + transmute(vld2q_s8(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0")] - fn _vld4_s32(ptr: *const i8, size: i32) -> int32x2x4_t; - } - _vld4_s32(a as *const i8, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { + transmute(vld2_s16(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vld4))] -pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0")] - fn _vld4q_s32(ptr: *const i8, size: i32) -> int32x4x4_t; - } - _vld4q_s32(a as *const i8, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { + transmute(vld2q_s16(transmute(a))) } -#[doc = "Load multiple 4-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f16)"] +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { + transmute(vld2_s32(transmute(a))) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { + transmute(vld2q_s32(transmute(a))) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { + transmute(vld2_s8(transmute(a))) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { + transmute(vld2q_s8(transmute(a))) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { + transmute(vld2_s16(transmute(a))) +} +#[doc = "Load multiple 2-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { + transmute(vld2q_s16(transmute(a))) +} +#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld4_lane_f16(a: *const f16, b: float16x4x4_t) -> float16x4x4_t { - static_assert_uimm_bits!(LANE, 2); +pub unsafe fn vld3_dup_f16(a: *const f16) -> float16x4x3_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f16.p0")] - fn _vld4_lane_f16( - ptr: *const f16, - a: float16x4_t, - b: float16x4_t, - c: float16x4_t, - d: float16x4_t, - n: i32, - size: i32, - ) -> float16x4x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f16.p0")] + fn _vld3_dup_f16(ptr: *const f16, size: i32) -> float16x4x3_t; } - _vld4_lane_f16(a as _, b.0, b.1, b.2, b.3, LANE, 2) + _vld3_dup_f16(a as _, 2) } -#[doc = "Load multiple 4-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld4q_lane_f16(a: *const f16, b: float16x8x4_t) -> float16x8x4_t { - static_assert_uimm_bits!(LANE, 3); +pub unsafe fn vld3q_dup_f16(a: *const f16) -> float16x8x3_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8f16.p0")] - fn _vld4q_lane_f16( - ptr: *const f16, - a: float16x8_t, - b: float16x8_t, - c: float16x8_t, - d: float16x8_t, - n: i32, - size: i32, - ) -> float16x8x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8f16.p0")] + fn _vld3q_dup_f16(ptr: *const f16, size: i32) -> float16x8x3_t; } - _vld4q_lane_f16(a as _, b.0, b.1, b.2, b.3, LANE, 2) + _vld3q_dup_f16(a as _, 2) } -#[doc = "Load multiple 4-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] +#[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld4_lane_f16(a: *const f16, b: float16x4x4_t) -> float16x4x4_t { - static_assert_uimm_bits!(LANE, 2); +pub unsafe fn vld3_dup_f16(a: *const f16) -> float16x4x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4f16.p0" + link_name = "llvm.aarch64.neon.ld3r.v4f16.p0" )] - fn _vld4_lane_f16( - a: float16x4_t, - b: float16x4_t, - c: float16x4_t, - d: float16x4_t, - n: i64, - ptr: *const f16, - ) -> float16x4x4_t; + fn _vld3_dup_f16(ptr: *const f16) -> float16x4x3_t; } - _vld4_lane_f16(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vld3_dup_f16(a as _) } -#[doc = "Load multiple 4-element structures to two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] +#[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vld4q_lane_f16(a: *const f16, b: float16x8x4_t) -> float16x8x4_t { - static_assert_uimm_bits!(LANE, 3); +pub unsafe fn vld3q_dup_f16(a: *const f16) -> float16x8x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v8f16.p0" + link_name = "llvm.aarch64.neon.ld3r.v8f16.p0" )] - fn _vld4q_lane_f16( - a: float16x8_t, - b: float16x8_t, - c: float16x8_t, - d: float16x8_t, - n: i64, - ptr: *const f16, - ) -> float16x8x4_t; + fn _vld3q_dup_f16(ptr: *const f16) -> float16x8x3_t; } - _vld4q_lane_f16(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vld3q_dup_f16(a as _) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0" + link_name = "llvm.aarch64.neon.ld3r.v2f32.p0" )] - fn _vld4_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i64, - ptr: *const i8, - ) -> float32x2x4_t; + fn _vld3_dup_f32(ptr: *const f32) -> float32x2x3_t; } - _vld4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vld3_dup_f32(a as _) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0" + link_name = "llvm.aarch64.neon.ld3r.v4f32.p0" )] - fn _vld4q_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i64, - ptr: *const i8, - ) -> float32x4x4_t; + fn _vld3q_dup_f32(ptr: *const f32) -> float32x4x3_t; } - _vld4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vld3q_dup_f32(a as _) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { - static_assert_uimm_bits!(LANE, 3); +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0" + link_name = "llvm.aarch64.neon.ld3r.v8i8.p0" )] - fn _vld4_lane_s8( - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - n: i64, - ptr: *const i8, - ) -> int8x8x4_t; + fn _vld3_dup_s8(ptr: *const i8) -> int8x8x3_t; } - _vld4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vld3_dup_s8(a as _) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0" + link_name = "llvm.aarch64.neon.ld3r.v16i8.p0" )] - fn _vld4_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i64, - ptr: *const i8, - ) -> int16x4x4_t; + fn _vld3q_dup_s8(ptr: *const i8) -> int8x16x3_t; } - _vld4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vld3q_dup_s8(a as _) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { - static_assert_uimm_bits!(LANE, 3); +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0" + link_name = "llvm.aarch64.neon.ld3r.v4i16.p0" )] - fn _vld4q_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i64, - ptr: *const i8, - ) -> int16x8x4_t; + fn _vld3_dup_s16(ptr: *const i16) -> int16x4x3_t; } - _vld4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vld3_dup_s16(a as _) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0" + link_name = "llvm.aarch64.neon.ld3r.v8i16.p0" )] - fn _vld4_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i64, - ptr: *const i8, - ) -> int32x2x4_t; + fn _vld3q_dup_s16(ptr: *const i16) -> int16x8x3_t; } - _vld4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vld3q_dup_s16(a as _) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(ld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0" + link_name = "llvm.aarch64.neon.ld3r.v2i32.p0" )] - fn _vld4q_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i64, - ptr: *const i8, - ) -> int32x4x4_t; + fn _vld3_dup_s32(ptr: *const i32) -> int32x2x3_t; } - _vld4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) + _vld3_dup_s32(a as _) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { - static_assert_uimm_bits!(LANE, 1); +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0")] - fn _vld4_lane_f32( - ptr: *const i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i32, - size: i32, - ) -> float32x2x4_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v4i32.p0" + )] + fn _vld3q_dup_s32(ptr: *const i32) -> int32x4x3_t; } - _vld4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) + _vld3q_dup_s32(a as _) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld3r))] +pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3r.v1i64.p0" + )] + fn _vld3_dup_s64(ptr: *const i64) -> int64x1x3_t; + } + _vld3_dup_s64(a as _) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0")] - fn _vld4q_lane_f32( - ptr: *const i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i32, - size: i32, - ) -> float32x4x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2f32.p0")] + fn _vld3_dup_f32(ptr: *const i8, size: i32) -> float32x2x3_t; } - _vld4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) + _vld3_dup_f32(a as *const i8, 4) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { - static_assert_uimm_bits!(LANE, 3); +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0")] - fn _vld4_lane_s8( - ptr: *const i8, - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - n: i32, - size: i32, - ) -> int8x8x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f32.p0")] + fn _vld3q_dup_f32(ptr: *const i8, size: i32) -> float32x4x3_t; } - _vld4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) + _vld3q_dup_f32(a as *const i8, 4) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0")] - fn _vld4_lane_s16( - ptr: *const i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i32, - size: i32, - ) -> int16x4x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i8.p0")] + fn _vld3_dup_s8(ptr: *const i8, size: i32) -> int8x8x3_t; } - _vld4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) + _vld3_dup_s8(a as *const i8, 1) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { - static_assert_uimm_bits!(LANE, 3); +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0")] - fn _vld4q_lane_s16( - ptr: *const i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i32, - size: i32, - ) -> int16x8x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v16i8.p0")] + fn _vld3q_dup_s8(ptr: *const i8, size: i32) -> int8x16x3_t; } - _vld4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) + _vld3q_dup_s8(a as *const i8, 1) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0")] - fn _vld4_lane_s32( - ptr: *const i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i32, - size: i32, - ) -> int32x2x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i16.p0")] + fn _vld3_dup_s16(ptr: *const i8, size: i32) -> int16x4x3_t; } - _vld4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) + _vld3_dup_s16(a as *const i8, 2) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(vld4, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0")] - fn _vld4q_lane_s32( - ptr: *const i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i32, - size: i32, - ) -> int32x4x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i16.p0")] + fn _vld3q_dup_s16(ptr: *const i8, size: i32) -> int16x8x3_t; } - _vld4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) + _vld3q_dup_s16(a as *const i8, 2) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u8)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2i32.p0")] + fn _vld3_dup_s32(ptr: *const i8, size: i32) -> int32x2x3_t; + } + _vld3_dup_s32(a as *const i8, 4) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i32.p0")] + fn _vld3q_dup_s32(ptr: *const i8, size: i32) -> int32x4x3_t; + } + _vld3q_dup_s32(a as *const i8, 4) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23262,23 +23959,37 @@ pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4_lane_s8::(transmute(a), transmute(b))) +pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t { + transmute(vld3_dup_s64(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v1i64.p0")] + fn _vld3_dup_s64(ptr: *const i8, size: i32) -> int64x1x3_t; + } + _vld3_dup_s64(a as *const i8, 8) +} +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23287,23 +23998,21 @@ pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uin target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld4_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { + transmute(vld3_dup_s64(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23312,23 +24021,21 @@ pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4q_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { + transmute(vld3_dup_s8(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23337,23 +24044,21 @@ pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t { - static_assert_uimm_bits!(LANE, 1); - transmute(vld4_lane_s32::(transmute(a), transmute(b))) +pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { + transmute(vld3q_dup_s8(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23362,23 +24067,21 @@ pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld4q_lane_s32::(transmute(a), transmute(b))) +pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { + transmute(vld3_dup_s16(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p8)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23387,23 +24090,21 @@ pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4_lane_s8::(transmute(a), transmute(b))) +pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { + transmute(vld3q_dup_s16(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23412,23 +24113,21 @@ pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> pol target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t { - static_assert_uimm_bits!(LANE, 2); - transmute(vld4_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { + transmute(vld3_dup_s32(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4, LANE = 0) + assert_instr(ld3r) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23437,21 +24136,20 @@ pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t { - static_assert_uimm_bits!(LANE, 3); - transmute(vld4q_lane_s16::(transmute(a), transmute(b))) +pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { + transmute(vld3q_dup_s32(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p64)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -23461,48 +24159,20 @@ pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { - transmute(vld4_s64(transmute(a))) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { - crate::ptr::read_unaligned(a.cast()) -} -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v1i64.p0")] - fn _vld4_s64(ptr: *const i8, size: i32) -> int64x1x4_t; - } - _vld4_s64(a as *const i8, 8) +pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { + transmute(vld3_dup_s8(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u64)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -23512,20 +24182,20 @@ pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { - transmute(vld4_s64(transmute(a))) +pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { + transmute(vld3q_dup_s8(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -23535,20 +24205,20 @@ pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { - transmute(vld4_s8(transmute(a))) +pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { + transmute(vld3_dup_s16(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] +#[doc = "Load single 3-element structure and replicate to all lanes of three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) + assert_instr(ld3r) )] #[cfg_attr( not(target_arch = "arm"), @@ -23558,392 +24228,775 @@ pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { - transmute(vld4q_s8(transmute(a))) +pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { + transmute(vld3q_dup_s16(transmute(a))) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { - transmute(vld4_s16(transmute(a))) +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld3_f16(a: *const f16) -> float16x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f16.p0")] + fn _vld3_f16(ptr: *const f16, size: i32) -> float16x4x3_t; + } + _vld3_f16(a as _, 2) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] +#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { - transmute(vld4q_s16(transmute(a))) +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld3q_f16(a: *const f16) -> float16x8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8f16.p0")] + fn _vld3q_f16(ptr: *const f16, size: i32) -> float16x8x3_t; + } + _vld3q_f16(a as _, 2) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ld3) )] -pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { - transmute(vld4_s32(transmute(a))) +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld3_f16(a: *const f16) -> float16x4x3_t { + crate::core_arch::macros::deinterleaving_load!(f16, 4, 3, a) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] +#[doc = "Load single 3-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ld3) )] -pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { - transmute(vld4q_s32(transmute(a))) +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld3q_f16(a: *const f16) -> float16x8x3_t { + crate::core_arch::macros::deinterleaving_load!(f16, 8, 3, a) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { - transmute(vld4_s8(transmute(a))) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { + crate::core_arch::macros::deinterleaving_load!(f32, 2, 3, a) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { - transmute(vld4q_s8(transmute(a))) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { + crate::core_arch::macros::deinterleaving_load!(f32, 4, 3, a) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { - transmute(vld4_s16(transmute(a))) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { + crate::core_arch::macros::deinterleaving_load!(i8, 8, 3, a) } -#[doc = "Load multiple 4-element structures to four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ld4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { - transmute(vld4q_s16(transmute(a))) +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { + crate::core_arch::macros::deinterleaving_load!(i8, 16, 3, a) } -#[doc = "Store SIMD&FP register (immediate offset)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldrq_p128)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vldrq_p128(a: *const p128) -> p128 { - *a +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { + crate::core_arch::macros::deinterleaving_load!(i16, 4, 3, a) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmax) -)] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { + crate::core_arch::macros::deinterleaving_load!(i16, 8, 3, a) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { + crate::core_arch::macros::deinterleaving_load!(i32, 2, 3, a) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3))] +pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { + crate::core_arch::macros::deinterleaving_load!(i32, 4, 3, a) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0")] + fn _vld3_f32(ptr: *const i8, size: i32) -> float32x2x3_t; + } + _vld3_f32(a as *const i8, 4) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0")] + fn _vld3q_f32(ptr: *const i8, size: i32) -> float32x4x3_t; + } + _vld3q_f32(a as *const i8, 4) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0")] + fn _vld3_s8(ptr: *const i8, size: i32) -> int8x8x3_t; + } + _vld3_s8(a as *const i8, 1) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0")] + fn _vld3q_s8(ptr: *const i8, size: i32) -> int8x16x3_t; + } + _vld3q_s8(a as *const i8, 1) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0")] + fn _vld3_s16(ptr: *const i8, size: i32) -> int16x4x3_t; + } + _vld3_s16(a as *const i8, 2) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0")] + fn _vld3q_s16(ptr: *const i8, size: i32) -> int16x8x3_t; + } + _vld3q_s16(a as *const i8, 2) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0")] + fn _vld3_s32(ptr: *const i8, size: i32) -> int32x2x3_t; + } + _vld3_s32(a as *const i8, 4) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld3))] +pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0")] + fn _vld3q_s32(ptr: *const i8, size: i32) -> int32x4x3_t; + } + _vld3q_s32(a as *const i8, 4) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub fn vmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { +pub unsafe fn vld3_lane_f16(a: *const f16, b: float16x4x3_t) -> float16x4x3_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v4f16" - )] - fn _vmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f16.p0")] + fn _vld3_lane_f16( + ptr: *const f16, + a: float16x4_t, + b: float16x4_t, + c: float16x4_t, + n: i32, + size: i32, + ) -> float16x4x3_t; } - unsafe { _vmax_f16(a, b) } + _vld3_lane_f16(a as _, b.0, b.1, b.2, LANE, 2) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f16)"] +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld3q_lane_f16(a: *const f16, b: float16x8x3_t) -> float16x8x3_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8f16.p0")] + fn _vld3q_lane_f16( + ptr: *const f16, + a: float16x8_t, + b: float16x8_t, + c: float16x8_t, + n: i32, + size: i32, + ) -> float16x8x3_t; + } + _vld3q_lane_f16(a as _, b.0, b.1, b.2, LANE, 2) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmax) -)] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -pub fn vmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { +pub unsafe fn vld3_lane_f16(a: *const f16, b: float16x4x3_t) -> float16x4x3_t { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8f16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v8f16" + link_name = "llvm.aarch64.neon.ld3lane.v4f16.p0" )] - fn _vmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + fn _vld3_lane_f16( + a: float16x4_t, + b: float16x4_t, + c: float16x4_t, + n: i64, + ptr: *const f16, + ) -> float16x4x3_t; } - unsafe { _vmaxq_f16(a, b) } + _vld3_lane_f16(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f32)"] +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ld3, LANE = 0) )] -pub fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { +#[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld3q_lane_f16(a: *const f16, b: float16x8x3_t) -> float16x8x3_t { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v2f32" + link_name = "llvm.aarch64.neon.ld3lane.v8f16.p0" )] - fn _vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + fn _vld3q_lane_f16( + a: float16x8_t, + b: float16x8_t, + c: float16x8_t, + n: i64, + ptr: *const f16, + ) -> float16x8x3_t; } - unsafe { _vmax_f32(a, b) } + _vld3q_lane_f16(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmax.v4f32" + link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0" )] - fn _vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + fn _vld3_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + n: i64, + ptr: *const i8, + ) -> float32x2x3_t; } - unsafe { _vmaxq_f32(a, b) } + _vld3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe { - let mask: int8x8_t = simd_ge(a, b); - simd_select(mask, a, b) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0" + )] + fn _vld3q_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + n: i64, + ptr: *const i8, + ) -> float32x4x3_t; } + _vld3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0")] + fn _vld3_lane_f32( + ptr: *const i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + n: i32, + size: i32, + ) -> float32x2x3_t; + } + _vld3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe { - let mask: int8x16_t = simd_ge(a, b); - simd_select(mask, a, b) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0" + )] + fn _vld3_lane_s8( + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + n: i64, + ptr: *const i8, + ) -> int8x8x3_t; } + _vld3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s16)"] +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0" + )] + fn _vld3_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + n: i64, + ptr: *const i8, + ) -> int16x4x3_t; + } + _vld3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { + static_assert_uimm_bits!(LANE, 4); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0" + )] + fn _vld3q_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + n: i64, + ptr: *const i8, + ) -> int16x8x3_t; + } + _vld3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0" + )] + fn _vld3_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + n: i64, + ptr: *const i8, + ) -> int32x2x3_t; + } + _vld3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0" + )] + fn _vld3q_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + n: i64, + ptr: *const i8, + ) -> int32x4x3_t; + } + _vld3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0")] + fn _vld3_lane_s8( + ptr: *const i8, + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + n: i32, + size: i32, + ) -> int8x8x3_t; + } + _vld3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0")] + fn _vld3_lane_s16( + ptr: *const i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + n: i32, + size: i32, + ) -> int16x4x3_t; + } + _vld3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0")] + fn _vld3q_lane_s16( + ptr: *const i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + n: i32, + size: i32, + ) -> int16x8x3_t; + } + _vld3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0")] + fn _vld3_lane_s32( + ptr: *const i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + n: i32, + size: i32, + ) -> int32x2x3_t; + } + _vld3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) +} +#[doc = "Load multiple 3-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0")] + fn _vld3q_lane_s32( + ptr: *const i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + n: i32, + size: i32, + ) -> int32x4x3_t; + } + _vld3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23952,22 +25005,23 @@ pub fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe { - let mask: int16x4_t = simd_ge(a, b); - simd_select(mask, a, b) - } +pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld3_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -23976,22 +25030,23 @@ pub fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe { - let mask: int16x8_t = simd_ge(a, b); - simd_select(mask, a, b) - } +pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld3_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24000,22 +25055,23 @@ pub fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe { - let mask: int32x2_t = simd_ge(a, b); - simd_select(mask, a, b) - } +pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld3q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smax) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24024,22 +25080,23 @@ pub fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe { - let mask: int32x4_t = simd_ge(a, b); - simd_select(mask, a, b) - } +pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld3_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24048,22 +25105,23 @@ pub fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { - let mask: uint8x8_t = simd_ge(a, b); - simd_select(mask, a, b) - } +pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld3q_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24072,22 +25130,23 @@ pub fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe { - let mask: uint8x16_t = simd_ge(a, b); - simd_select(mask, a, b) - } +pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld3_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24096,22 +25155,23 @@ pub fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { - let mask: uint16x4_t = simd_ge(a, b); - simd_select(mask, a, b) - } +pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld3_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) + assert_instr(ld3, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24120,21 +25180,21 @@ pub fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { - let mask: uint16x8_t = simd_ge(a, b); - simd_select(mask, a, b) - } +pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld3q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -24144,105 +25204,48 @@ pub fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { - let mask: uint32x2_t = simd_ge(a, b); - simd_select(mask, a, b) - } +pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { + transmute(vld3_s64(transmute(a))) } -#[doc = "Maximum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umax) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { - let mask: uint32x4_t = simd_ge(a, b); - simd_select(mask, a, b) - } -} -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxnm) -)] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v4f16" - )] - fn _vmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; - } - unsafe { _vmaxnm_f16(a, b) } +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { + crate::ptr::read_unaligned(a.cast()) } -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxnm) -)] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v8f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v8f16" - )] - fn _vmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v1i64.p0")] + fn _vld3_s64(ptr: *const i8, size: i32) -> int64x1x3_t; } - unsafe { _vmaxnmq_f16(a, b) } + _vld3_s64(a as *const i8, 8) } -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxnm) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -24252,26 +25255,20 @@ pub fn vmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v2f32" - )] - fn _vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - unsafe { _vmaxnm_f32(a, b) } +pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { + transmute(vld3_s64(transmute(a))) } -#[doc = "Floating-point Maximum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxnm) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -24281,86 +25278,20 @@ pub fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxnm.v4f32" - )] - fn _vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - unsafe { _vmaxnmq_f32(a, b) } -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmin) -)] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v4f16" - )] - fn _vmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; - } - unsafe { _vmin_f16(a, b) } -} -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmin) -)] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v8f16" - )] - fn _vminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; - } - unsafe { _vminq_f16(a, b) } +pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { + transmute(vld3_s8(transmute(a))) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmin) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -24370,26 +25301,20 @@ pub fn vminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v2f32" - )] - fn _vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - unsafe { _vmin_f32(a, b) } +pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { + transmute(vld3q_s8(transmute(a))) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmin) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -24399,26 +25324,20 @@ pub fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmin.v4f32" - )] - fn _vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - unsafe { _vminq_f32(a, b) } +pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { + transmute(vld3_s16(transmute(a))) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -24428,21 +25347,20 @@ pub fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe { - let mask: int8x8_t = simd_le(a, b); - simd_select(mask, a, b) - } +pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { + transmute(vld3q_s16(transmute(a))) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -24452,21 +25370,20 @@ pub fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe { - let mask: int8x16_t = simd_le(a, b); - simd_select(mask, a, b) - } +pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { + transmute(vld3_s32(transmute(a))) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -24476,21 +25393,20 @@ pub fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe { - let mask: int16x4_t = simd_le(a, b); - simd_select(mask, a, b) - } +pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { + transmute(vld3q_s32(transmute(a))) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s16)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -24500,21 +25416,20 @@ pub fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe { - let mask: int16x8_t = simd_le(a, b); - simd_select(mask, a, b) - } +pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { + transmute(vld3_s8(transmute(a))) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -24524,21 +25439,20 @@ pub fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe { - let mask: int32x2_t = simd_le(a, b); - simd_select(mask, a, b) - } +pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { + transmute(vld3q_s8(transmute(a))) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s32)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smin) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -24548,21 +25462,20 @@ pub fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe { - let mask: int32x4_t = simd_le(a, b); - simd_select(mask, a, b) - } +pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { + transmute(vld3_s16(transmute(a))) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u8)"] +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) + assert_instr(ld3) )] #[cfg_attr( not(target_arch = "arm"), @@ -24572,230 +25485,426 @@ pub fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { - let mask: uint8x8_t = simd_le(a, b); - simd_select(mask, a, b) +pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { + transmute(vld3q_s16(transmute(a))) +} +#[doc = "Load multiple 3-element structures to three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld3, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0")] + fn _vld3q_lane_f32( + ptr: *const i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + n: i32, + size: i32, + ) -> float32x4x3_t; } + _vld3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u8)"] +#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe { - let mask: uint8x16_t = simd_le(a, b); - simd_select(mask, a, b) +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f16.p0")] + fn _vld4_dup_f16(ptr: *const f16, size: i32) -> float16x4x4_t; } + _vld4_dup_f16(a as _, 2) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { - let mask: uint16x4_t = simd_le(a, b); - simd_select(mask, a, b) +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld4q_dup_f16(a: *const f16) -> float16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8f16.p0")] + fn _vld4q_dup_f16(ptr: *const f16, size: i32) -> float16x8x4_t; } + _vld4q_dup_f16(a as _, 2) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ld4r) )] -pub fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { - let mask: uint16x8_t = simd_le(a, b); - simd_select(mask, a, b) +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v4f16.p0" + )] + fn _vld4_dup_f16(ptr: *const f16) -> float16x4x4_t; } + _vld4_dup_f16(a as _) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ld4r) )] -pub fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { - let mask: uint32x2_t = simd_le(a, b); - simd_select(mask, a, b) - } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld4q_dup_f16(a: *const f16) -> float16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v8f16.p0" + )] + fn _vld4q_dup_f16(ptr: *const f16) -> float16x8x4_t; + } + _vld4q_dup_f16(a as _) } -#[doc = "Minimum (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0")] + fn _vld4_dup_f32(ptr: *const i8, size: i32) -> float32x2x4_t; + } + _vld4_dup_f32(a as *const i8, 4) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0")] + fn _vld4q_dup_f32(ptr: *const i8, size: i32) -> float32x4x4_t; + } + _vld4q_dup_f32(a as *const i8, 4) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0")] + fn _vld4_dup_s8(ptr: *const i8, size: i32) -> int8x8x4_t; + } + _vld4_dup_s8(a as *const i8, 1) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0")] + fn _vld4q_dup_s8(ptr: *const i8, size: i32) -> int8x16x4_t; + } + _vld4q_dup_s8(a as *const i8, 1) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0")] + fn _vld4_dup_s16(ptr: *const i8, size: i32) -> int16x4x4_t; + } + _vld4_dup_s16(a as *const i8, 2) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0")] + fn _vld4q_dup_s16(ptr: *const i8, size: i32) -> int16x8x4_t; + } + _vld4q_dup_s16(a as *const i8, 2) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0")] + fn _vld4_dup_s32(ptr: *const i8, size: i32) -> int32x2x4_t; + } + _vld4_dup_s32(a as *const i8, 4) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0")] + fn _vld4q_dup_s32(ptr: *const i8, size: i32) -> int32x4x4_t; + } + _vld4q_dup_s32(a as *const i8, 4) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umin) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { - let mask: uint32x4_t = simd_le(a, b); - simd_select(mask, a, b) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v2f32.p0.p0" + )] + fn _vld4_dup_f32(ptr: *const f32) -> float32x2x4_t; } + _vld4_dup_f32(a as _) } -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminnm) -)] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v4f16" + link_name = "llvm.aarch64.neon.ld4r.v4f32.p0.p0" )] - fn _vminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + fn _vld4q_dup_f32(ptr: *const f32) -> float32x4x4_t; } - unsafe { _vminnm_f16(a, b) } + _vld4q_dup_f32(a as _) } -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminnm) -)] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v8f16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v8f16" + link_name = "llvm.aarch64.neon.ld4r.v8i8.p0.p0" )] - fn _vminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + fn _vld4_dup_s8(ptr: *const i8) -> int8x8x4_t; } - unsafe { _vminnmq_f16(a, b) } + _vld4_dup_s8(a as _) } -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminnm) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v2f32" + link_name = "llvm.aarch64.neon.ld4r.v16i8.p0.p0" )] - fn _vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + fn _vld4q_dup_s8(ptr: *const i8) -> int8x16x4_t; } - unsafe { _vminnm_f32(a, b) } + _vld4q_dup_s8(a as _) } -#[doc = "Floating-point Minimum Number (vector)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v4i16.p0.p0" + )] + fn _vld4_dup_s16(ptr: *const i16) -> int16x4x4_t; + } + _vld4_dup_s16(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v8i16.p0.p0" + )] + fn _vld4q_dup_s16(ptr: *const i16) -> int16x8x4_t; + } + _vld4q_dup_s16(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v2i32.p0.p0" + )] + fn _vld4_dup_s32(ptr: *const i32) -> int32x2x4_t; + } + _vld4_dup_s32(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v4i32.p0.p0" + )] + fn _vld4q_dup_s32(ptr: *const i32) -> int32x4x4_t; + } + _vld4q_dup_s32(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4r))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4r.v1i64.p0.p0" + )] + fn _vld4_dup_s64(ptr: *const i64) -> int64x1x4_t; + } + _vld4_dup_s64(a as _) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminnm) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24805,26 +25914,36 @@ pub fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { +pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { + transmute(vld4_dup_s64(transmute(a))) +} +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(nop))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminnm.v4f32" - )] - fn _vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v1i64.p0")] + fn _vld4_dup_s64(ptr: *const i8, size: i32) -> int64x1x4_t; } - unsafe { _vminnmq_f32(a, b) } + _vld4_dup_s64(a as *const i8, 8) } -#[doc = "Floating-point multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24834,18 +25953,20 @@ pub fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { + transmute(vld4_dup_s64(transmute(a))) } -#[doc = "Floating-point multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(ld4r) )] #[cfg_attr( not(target_arch = "arm"), @@ -24855,20 +25976,21 @@ pub fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { + transmute(vld4_dup_s8(transmute(a))) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_f32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(ld4r) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24877,25 +25999,21 @@ pub fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmla_f32(a, b, vdup_lane_f32::(c)) +pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { + transmute(vld4q_dup_s8(transmute(a))) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_f32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(ld4r) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24904,25 +26022,21 @@ pub fn vmla_lane_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmla_f32(a, b, vdup_laneq_f32::(c)) +pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { + transmute(vld4_dup_s16(transmute(a))) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_f32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(ld4r) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24931,25 +26045,21 @@ pub fn vmla_laneq_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlaq_f32(a, b, vdupq_lane_f32::(c)) +pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { + transmute(vld4q_dup_s16(transmute(a))) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_f32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(ld4r) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24958,25 +26068,21 @@ pub fn vmlaq_lane_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlaq_f32(a, b, vdupq_laneq_f32::(c)) +pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { + transmute(vld4_dup_s32(transmute(a))) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ld4r) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -24985,21 +26091,21 @@ pub fn vmlaq_laneq_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - vmla_s16(a, b, vdup_lane_s16::(c)) +pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { + transmute(vld4q_dup_s32(transmute(a))) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ld4r) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25008,21 +26114,21 @@ pub fn vmla_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - vmla_u16(a, b, vdup_lane_u16::(c)) +pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { + transmute(vld4_dup_s8(transmute(a))) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ld4r) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25031,21 +26137,21 @@ pub fn vmla_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - vmla_s16(a, b, vdup_laneq_s16::(c)) +pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { + transmute(vld4q_dup_s8(transmute(a))) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ld4r) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25054,21 +26160,21 @@ pub fn vmla_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 3); - vmla_u16(a, b, vdup_laneq_u16::(c)) +pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { + transmute(vld4_dup_s16(transmute(a))) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) + assert_instr(ld4r) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25077,609 +26183,812 @@ pub fn vmla_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - vmlaq_s16(a, b, vdupq_lane_s16::(c)) +pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { + transmute(vld4q_dup_s16(transmute(a))) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmlaq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 2); - vmlaq_u16(a, b, vdupq_lane_u16::(c)) +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld4_f16(a: *const f16) -> float16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f16.p0")] + fn _vld4_f16(ptr: *const f16, size: i32) -> float16x4x4_t; + } + _vld4_f16(a as _, 2) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmlaq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - vmlaq_s16(a, b, vdupq_laneq_s16::(c)) +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld4q_f16(a: *const f16) -> float16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8f16.p0")] + fn _vld4q_f16(ptr: *const f16, size: i32) -> float16x8x4_t; + } + _vld4q_f16(a as _, 2) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u16)"] +#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ld4) )] -pub fn vmlaq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - vmlaq_u16(a, b, vdupq_laneq_u16::(c)) +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld4_f16(a: *const f16) -> float16x4x4_t { + crate::core_arch::macros::deinterleaving_load!(f16, 4, 4, a) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s32)"] +#[doc = "Load single 4-element structure and replicate to all lanes of two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ld4) )] -pub fn vmla_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmla_s32(a, b, vdup_lane_s32::(c)) +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld4q_f16(a: *const f16) -> float16x8x4_t { + crate::core_arch::macros::deinterleaving_load!(f16, 8, 4, a) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmla_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmla_u32(a, b, vdup_lane_u32::(c)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { + crate::core_arch::macros::deinterleaving_load!(f32, 2, 4, a) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmla_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmla_s32(a, b, vdup_laneq_s32::(c)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { + crate::core_arch::macros::deinterleaving_load!(f32, 4, 4, a) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmla_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmla_u32(a, b, vdup_laneq_u32::(c)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { + crate::core_arch::macros::deinterleaving_load!(i8, 8, 4, a) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmlaq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlaq_s32(a, b, vdupq_lane_s32::(c)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { + crate::core_arch::macros::deinterleaving_load!(i8, 16, 4, a) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmlaq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlaq_u32(a, b, vdupq_lane_u32::(c)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { + crate::core_arch::macros::deinterleaving_load!(i16, 4, 4, a) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmlaq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlaq_s32(a, b, vdupq_laneq_s32::(c)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { + crate::core_arch::macros::deinterleaving_load!(i16, 8, 4, a) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla, LANE = 1) -)] -#[rustc_legacy_const_generics(3)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmlaq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlaq_u32(a, b, vdupq_laneq_u32::(c)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { + crate::core_arch::macros::deinterleaving_load!(i32, 2, 4, a) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_f32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - vmla_f32(a, b, vdup_n_f32(c)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(ld4))] +pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { + crate::core_arch::macros::deinterleaving_load!(i32, 4, 4, a) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_f32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - vmlaq_f32(a, b, vdupq_n_f32(c)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0")] + fn _vld4_f32(ptr: *const i8, size: i32) -> float32x2x4_t; + } + _vld4_f32(a as *const i8, 4) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { - vmla_s16(a, b, vdup_n_s16(c)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0")] + fn _vld4q_f32(ptr: *const i8, size: i32) -> float32x4x4_t; + } + _vld4q_f32(a as *const i8, 4) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { - vmlaq_s16(a, b, vdupq_n_s16(c)) +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0")] + fn _vld4_s8(ptr: *const i8, size: i32) -> int8x8x4_t; + } + _vld4_s8(a as *const i8, 1) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { - vmla_u16(a, b, vdup_n_u16(c)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0")] + fn _vld4q_s8(ptr: *const i8, size: i32) -> int8x16x4_t; + } + _vld4q_s8(a as *const i8, 1) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { - vmlaq_u16(a, b, vdupq_n_u16(c)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0")] + fn _vld4_s16(ptr: *const i8, size: i32) -> int16x4x4_t; + } + _vld4_s16(a as *const i8, 2) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { - vmla_s32(a, b, vdup_n_s32(c)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0")] + fn _vld4q_s16(ptr: *const i8, size: i32) -> int16x8x4_t; + } + _vld4q_s16(a as *const i8, 2) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { - vmlaq_s32(a, b, vdupq_n_s32(c)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0")] + fn _vld4_s32(ptr: *const i8, size: i32) -> int32x2x4_t; + } + _vld4_s32(a as *const i8, 4) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { - vmla_u32(a, b, vdup_n_u32(c)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vld4))] +pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0")] + fn _vld4q_s32(ptr: *const i8, size: i32) -> int32x4x4_t; + } + _vld4q_s32(a as *const i8, 4) } -#[doc = "Vector multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u32)"] +#[doc = "Load multiple 4-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { - vmlaq_u32(a, b, vdupq_n_u32(c)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld4_lane_f16(a: *const f16, b: float16x4x4_t) -> float16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f16.p0")] + fn _vld4_lane_f16( + ptr: *const f16, + a: float16x4_t, + b: float16x4_t, + c: float16x4_t, + d: float16x4_t, + n: i32, + size: i32, + ) -> float16x4x4_t; + } + _vld4_lane_f16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s8)"] +#[doc = "Load multiple 4-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - unsafe { simd_add(a, simd_mul(b, c)) } +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld4q_lane_f16(a: *const f16, b: float16x8x4_t) -> float16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8f16.p0")] + fn _vld4q_lane_f16( + ptr: *const f16, + a: float16x8_t, + b: float16x8_t, + c: float16x8_t, + d: float16x8_t, + n: i32, + size: i32, + ) -> float16x8x4_t; + } + _vld4q_lane_f16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s8)"] +#[doc = "Load multiple 4-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ld4, LANE = 0) )] -pub fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - unsafe { simd_add(a, simd_mul(b, c)) } +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld4_lane_f16(a: *const f16, b: float16x4x4_t) -> float16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v4f16.p0" + )] + fn _vld4_lane_f16( + a: float16x4_t, + b: float16x4_t, + c: float16x4_t, + d: float16x4_t, + n: i64, + ptr: *const f16, + ) -> float16x4x4_t; + } + _vld4_lane_f16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s16)"] +#[doc = "Load multiple 4-element structures to two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(ld4, LANE = 0) )] -pub fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - unsafe { simd_add(a, simd_mul(b, c)) } +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vld4q_lane_f16(a: *const f16, b: float16x8x4_t) -> float16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v8f16.p0" + )] + fn _vld4q_lane_f16( + a: float16x8_t, + b: float16x8_t, + c: float16x8_t, + d: float16x8_t, + n: i64, + ptr: *const f16, + ) -> float16x8x4_t; + } + _vld4q_lane_f16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - unsafe { simd_add(a, simd_mul(b, c)) } +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0" + )] + fn _vld4_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i64, + ptr: *const i8, + ) -> float32x2x4_t; + } + _vld4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - unsafe { simd_add(a, simd_mul(b, c)) } +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0" + )] + fn _vld4q_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + n: i64, + ptr: *const i8, + ) -> float32x4x4_t; + } + _vld4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - unsafe { simd_add(a, simd_mul(b, c)) } +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0" + )] + fn _vld4_lane_s8( + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + n: i64, + ptr: *const i8, + ) -> int8x8x4_t; + } + _vld4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0" + )] + fn _vld4_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + n: i64, + ptr: *const i8, + ) -> int16x4x4_t; + } + _vld4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0" + )] + fn _vld4q_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + n: i64, + ptr: *const i8, + ) -> int16x8x4_t; + } + _vld4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0" + )] + fn _vld4_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + n: i64, + ptr: *const i8, + ) -> int32x2x4_t; + } + _vld4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(ld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0" + )] + fn _vld4q_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + n: i64, + ptr: *const i8, + ) -> int32x4x4_t; + } + _vld4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0")] + fn _vld4_lane_f32( + ptr: *const i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i32, + size: i32, + ) -> float32x2x4_t; + } + _vld4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0")] + fn _vld4q_lane_f32( + ptr: *const i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + n: i32, + size: i32, + ) -> float32x4x4_t; + } + _vld4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0")] + fn _vld4_lane_s8( + ptr: *const i8, + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + n: i32, + size: i32, + ) -> int8x8x4_t; + } + _vld4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0")] + fn _vld4_lane_s16( + ptr: *const i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + n: i32, + size: i32, + ) -> int16x4x4_t; + } + _vld4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0")] + fn _vld4q_lane_s16( + ptr: *const i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + n: i32, + size: i32, + ) -> int16x8x4_t; + } + _vld4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0")] + fn _vld4_lane_s32( + ptr: *const i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + n: i32, + size: i32, + ) -> int32x2x4_t; + } + _vld4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[cfg_attr(test, assert_instr(vld4, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0")] + fn _vld4q_lane_s32( + ptr: *const i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + n: i32, + size: i32, + ) -> int32x4x4_t; + } + _vld4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25688,19 +26997,23 @@ pub fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld4_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25709,19 +27022,23 @@ pub fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld4_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25730,19 +27047,23 @@ pub fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld4q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25751,19 +27072,23 @@ pub fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t { + static_assert_uimm_bits!(LANE, 1); + transmute(vld4_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25772,19 +27097,23 @@ pub fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t { + static_assert_uimm_bits!(LANE, 2); + transmute(vld4q_lane_s32::(transmute(a), transmute(b))) } -#[doc = "Multiply-add to accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mla) + assert_instr(ld4, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25793,20 +27122,23 @@ pub fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe { simd_add(a, simd_mul(b, c)) } +pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { + static_assert_uimm_bits!(LANE, 3); + transmute(vld4_lane_s8::(transmute(a), transmute(b))) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) + assert_instr(ld4, LANE = 0) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25815,21 +27147,23 @@ pub fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { +pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t { static_assert_uimm_bits!(LANE, 2); - vmlal_s16(a, b, vdup_lane_s16::(c)) + transmute(vld4_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) + assert_instr(ld4, LANE = 0) )] -#[rustc_legacy_const_generics(3)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25838,21 +27172,22 @@ pub fn vmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { +pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t { static_assert_uimm_bits!(LANE, 3); - vmlal_s16(a, b, vdup_laneq_s16::(c)) + transmute(vld4q_lane_s16::(transmute(a), transmute(b))) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) + assert_instr(nop) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25861,21 +27196,49 @@ pub fn vmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlal_s32(a, b, vdup_lane_s32::(c)) +pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { + transmute(vld4_s64(transmute(a))) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { + crate::ptr::read_unaligned(a.cast()) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v1i64.p0")] + fn _vld4_s64(ptr: *const i8, size: i32) -> int64x1x4_t; + } + _vld4_s64(a as *const i8, 8) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal, LANE = 1) + assert_instr(nop) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25884,21 +27247,21 @@ pub fn vmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_s32(a, b, vdup_laneq_s32::(c)) +pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { + transmute(vld4_s64(transmute(a))) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) + assert_instr(ld4) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25907,21 +27270,21 @@ pub fn vmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_u16(a, b, vdup_lane_u16::(c)) +pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { + transmute(vld4_s8(transmute(a))) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) + assert_instr(ld4) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25930,21 +27293,21 @@ pub fn vmlal_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmlal_u16(a, b, vdup_laneq_u16::(c)) -} -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u32)"] +pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { + transmute(vld4q_s8(transmute(a))) +} +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) + assert_instr(ld4) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25953,21 +27316,21 @@ pub fn vmlal_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlal_u32(a, b, vdup_lane_u32::(c)) +pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { + transmute(vld4_s16(transmute(a))) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal, LANE = 1) + assert_instr(ld4) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -25976,19 +27339,20 @@ pub fn vmlal_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlal_u32(a, b, vdup_laneq_u32::(c)) +pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { + transmute(vld4q_s16(transmute(a))) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -25998,18 +27362,20 @@ pub fn vmlal_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - vmlal_s16(a, b, vdup_n_s16(c)) +pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { + transmute(vld4_s32(transmute(a))) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -26019,18 +27385,20 @@ pub fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - vmlal_s32(a, b, vdup_n_s32(c)) +pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { + transmute(vld4q_s32(transmute(a))) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -26040,18 +27408,20 @@ pub fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { - vmlal_u16(a, b, vdup_n_u16(c)) +pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { + transmute(vld4_s8(transmute(a))) } -#[doc = "Vector widening multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u32)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -26061,18 +27431,20 @@ pub fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { - vmlal_u32(a, b, vdup_n_u32(c)) +pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { + transmute(vld4q_s8(transmute(a))) } -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s8)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -26082,18 +27454,20 @@ pub fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { - unsafe { simd_add(a, vmull_s8(b, c)) } +pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { + transmute(vld4_s16(transmute(a))) } -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s16)"] +#[doc = "Load multiple 4-element structures to four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) + assert_instr(ld4) )] #[cfg_attr( not(target_arch = "arm"), @@ -26103,18 +27477,20 @@ pub fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - unsafe { simd_add(a, vmull_s16(b, c)) } +pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { + transmute(vld4q_s16(transmute(a))) } -#[doc = "Signed multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s32)"] +#[doc = "Store SIMD&FP register (immediate offset)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldrq_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlal) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -26124,60 +27500,78 @@ pub fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - unsafe { simd_add(a, vmull_s32(b, c)) } +pub unsafe fn vldrq_p128(a: *const p128) -> p128 { + *a } -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u8)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) + assert_instr(fmax) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { - unsafe { simd_add(a, vmull_u8(b, c)) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmax.v4f16" + )] + fn _vmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + } + unsafe { _vmax_f16(a, b) } } -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u16)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) + assert_instr(fmax) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { - unsafe { simd_add(a, vmull_u16(b, c)) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmax.v8f16" + )] + fn _vmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + } + unsafe { _vmaxq_f16(a, b) } } -#[doc = "Unsigned multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u32)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlal) + assert_instr(fmax) )] #[cfg_attr( not(target_arch = "arm"), @@ -26187,18 +27581,26 @@ pub fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - unsafe { simd_add(a, vmull_u32(b, c)) } +pub fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmax.v2f32" + )] + fn _vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { _vmax_f32(a, b) } } -#[doc = "Floating-point multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f32)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(fmax) )] #[cfg_attr( not(target_arch = "arm"), @@ -26208,18 +27610,26 @@ pub fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmax.v4f32" + )] + fn _vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + unsafe { _vmaxq_f32(a, b) } } -#[doc = "Floating-point multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f32)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(smax) )] #[cfg_attr( not(target_arch = "arm"), @@ -26229,20 +27639,22 @@ pub fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { + let mask: int8x8_t = simd_ge(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_f32)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(smax) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26251,25 +27663,22 @@ pub fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmls_f32(a, b, vdup_lane_f32::(c)) +pub fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { + let mask: int8x16_t = simd_ge(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_f32)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(smax) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26278,25 +27687,22 @@ pub fn vmls_lane_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, -) -> float32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmls_f32(a, b, vdup_laneq_f32::(c)) +pub fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { + let mask: int16x4_t = simd_ge(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_f32)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(smax) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26305,25 +27711,22 @@ pub fn vmls_laneq_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlsq_f32(a, b, vdupq_lane_f32::(c)) +pub fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { + let mask: int16x8_t = simd_ge(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_f32)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 1) + assert_instr(smax) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26332,25 +27735,22 @@ pub fn vmlsq_lane_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, -) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlsq_f32(a, b, vdupq_laneq_f32::(c)) +pub fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { + let mask: int32x2_t = simd_ge(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s16)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(smax) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26359,21 +27759,22 @@ pub fn vmlsq_laneq_f32( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - vmls_s16(a, b, vdup_lane_s16::(c)) +pub fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { + let mask: int32x4_t = simd_ge(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u16)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(umax) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26382,21 +27783,22 @@ pub fn vmls_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - vmls_u16(a, b, vdup_lane_u16::(c)) +pub fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { + let mask: uint8x8_t = simd_ge(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s16)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(umax) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26405,21 +27807,22 @@ pub fn vmls_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - vmls_s16(a, b, vdup_laneq_s16::(c)) +pub fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { + let mask: uint8x16_t = simd_ge(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u16)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(umax) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26428,21 +27831,22 @@ pub fn vmls_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 3); - vmls_u16(a, b, vdup_laneq_u16::(c)) +pub fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { + let mask: uint16x4_t = simd_ge(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s16)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(umax) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26451,21 +27855,22 @@ pub fn vmls_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - vmlsq_s16(a, b, vdupq_lane_s16::(c)) +pub fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { + let mask: uint16x8_t = simd_ge(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u16)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(umax) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26474,21 +27879,22 @@ pub fn vmlsq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 2); - vmlsq_u16(a, b, vdupq_lane_u16::(c)) +pub fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { + let mask: uint32x2_t = simd_ge(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s16)"] +#[doc = "Maximum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(umax) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26497,67 +27903,82 @@ pub fn vmlsq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - vmlsq_s16(a, b, vdupq_laneq_s16::(c)) +pub fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { + let mask: uint32x4_t = simd_ge(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u16)"] +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(fmaxnm) )] -#[rustc_legacy_const_generics(3)] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - vmlsq_u16(a, b, vdupq_laneq_u16::(c)) +#[cfg(not(target_arch = "arm64ec"))] +pub fn vmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnm.v4f16" + )] + fn _vmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + } + unsafe { _vmaxnm_f16(a, b) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s32)"] +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(fmaxnm) )] -#[rustc_legacy_const_generics(3)] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmls_s32(a, b, vdup_lane_s32::(c)) +#[cfg(not(target_arch = "arm64ec"))] +pub fn vmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v8f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnm.v8f16" + )] + fn _vmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + } + unsafe { _vmaxnmq_f16(a, b) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u32)"] +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(fmaxnm) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26566,21 +27987,27 @@ pub fn vmls_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - vmls_u32(a, b, vdup_lane_u32::(c)) +pub fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnm.v2f32" + )] + fn _vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { _vmaxnm_f32(a, b) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s32)"] +#[doc = "Floating-point Maximum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(fmaxnm) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26589,67 +28016,87 @@ pub fn vmls_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmls_s32(a, b, vdup_laneq_s32::(c)) +pub fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmaxnm.v4f32" + )] + fn _vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + unsafe { _vmaxnmq_f32(a, b) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u32)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(fmin) )] -#[rustc_legacy_const_generics(3)] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 2); - vmls_u32(a, b, vdup_laneq_u32::(c)) +#[cfg(not(target_arch = "arm64ec"))] +pub fn vmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmin.v4f16" + )] + fn _vmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + } + unsafe { _vmin_f16(a, b) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s32)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(fmin) )] -#[rustc_legacy_const_generics(3)] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlsq_s32(a, b, vdupq_lane_s32::(c)) +#[cfg(not(target_arch = "arm64ec"))] +pub fn vminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmin.v8f16" + )] + fn _vminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + } + unsafe { _vminq_f16(a, b) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u32)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(fmin) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26658,21 +28105,27 @@ pub fn vmlsq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 1); - vmlsq_u32(a, b, vdupq_lane_u32::(c)) +pub fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmin.v2f32" + )] + fn _vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { _vmin_f32(a, b) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s32)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(fmin) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26681,21 +28134,27 @@ pub fn vmlsq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlsq_s32(a, b, vdupq_laneq_s32::(c)) +pub fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fmin.v4f32" + )] + fn _vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + unsafe { _vminq_f32(a, b) } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u32)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls, LANE = 1) + assert_instr(smin) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -26704,19 +28163,21 @@ pub fn vmlsq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmlsq_u32(a, b, vdupq_laneq_u32::(c)) +pub fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { + let mask: int8x8_t = simd_le(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_f32)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(smin) )] #[cfg_attr( not(target_arch = "arm"), @@ -26726,18 +28187,21 @@ pub fn vmlsq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { - vmls_f32(a, b, vdup_n_f32(c)) +pub fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { + let mask: int8x16_t = simd_le(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_f32)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(smin) )] #[cfg_attr( not(target_arch = "arm"), @@ -26747,18 +28211,21 @@ pub fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { - vmlsq_f32(a, b, vdupq_n_f32(c)) +pub fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { + let mask: int16x4_t = simd_le(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s16)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(smin) )] #[cfg_attr( not(target_arch = "arm"), @@ -26768,18 +28235,21 @@ pub fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { - vmls_s16(a, b, vdup_n_s16(c)) +pub fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { + let mask: int16x8_t = simd_le(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s16)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(smin) )] #[cfg_attr( not(target_arch = "arm"), @@ -26789,18 +28259,21 @@ pub fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { - vmlsq_s16(a, b, vdupq_n_s16(c)) +pub fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { + let mask: int32x2_t = simd_le(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u16)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(smin) )] #[cfg_attr( not(target_arch = "arm"), @@ -26810,18 +28283,21 @@ pub fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { - vmls_u16(a, b, vdup_n_u16(c)) +pub fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { + let mask: int32x4_t = simd_le(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u16)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -26831,18 +28307,21 @@ pub fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { - vmlsq_u16(a, b, vdupq_n_u16(c)) +pub fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { + let mask: uint8x8_t = simd_le(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s32)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -26852,18 +28331,21 @@ pub fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { - vmls_s32(a, b, vdup_n_s32(c)) +pub fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { + let mask: uint8x16_t = simd_le(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s32)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -26873,18 +28355,21 @@ pub fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { - vmlsq_s32(a, b, vdupq_n_s32(c)) +pub fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { + let mask: uint16x4_t = simd_le(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u32)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -26894,18 +28379,21 @@ pub fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { - vmls_u32(a, b, vdup_n_u32(c)) +pub fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { + let mask: uint16x8_t = simd_le(a, b); + simd_select(mask, a, b) + } } -#[doc = "Vector multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u32)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -26915,18 +28403,21 @@ pub fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { - vmlsq_u32(a, b, vdupq_n_u32(c)) +pub fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { + let mask: uint32x2_t = simd_le(a, b); + simd_select(mask, a, b) + } } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s8)"] +#[doc = "Minimum (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(umin) )] #[cfg_attr( not(target_arch = "arm"), @@ -26936,60 +28427,81 @@ pub fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { + let mask: uint32x4_t = simd_le(a, b); + simd_select(mask, a, b) + } } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s8)"] +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(fminnm) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnm.v4f16" + )] + fn _vminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + } + unsafe { _vminnm_f16(a, b) } } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s16)"] +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(fminnm) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v8f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnm.v8f16" + )] + fn _vminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + } + unsafe { _vminnmq_f16(a, b) } } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s16)"] +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(fminnm) )] #[cfg_attr( not(target_arch = "arm"), @@ -26999,18 +28511,26 @@ pub fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnm.v2f32" + )] + fn _vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { _vminnm_f32(a, b) } } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s32)"] +#[doc = "Floating-point Minimum Number (vector)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(fminnm) )] #[cfg_attr( not(target_arch = "arm"), @@ -27020,18 +28540,26 @@ pub fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminnm.v4f32" + )] + fn _vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + unsafe { _vminnmq_f32(a, b) } } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s32)"] +#[doc = "Floating-point multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -27041,18 +28569,18 @@ pub fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u8)"] +#[doc = "Floating-point multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -27062,19 +28590,20 @@ pub fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u8)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -27083,19 +28612,25 @@ pub fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmla_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmla_f32(a, b, vdup_lane_f32::(c)) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -27104,19 +28639,25 @@ pub fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmla_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmla_f32(a, b, vdup_laneq_f32::(c)) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -27125,19 +28666,25 @@ pub fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmlaq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlaq_f32(a, b, vdupq_lane_f32::(c)) } -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -27146,19 +28693,25 @@ pub fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { - unsafe { simd_sub(a, simd_mul(b, c)) } -} -#[doc = "Multiply-subtract from accumulator"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u32)"] +pub fn vmlaq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlaq_f32(a, b, vdupq_laneq_f32::(c)) +} +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mls) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -27167,18 +28720,19 @@ pub fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { - unsafe { simd_sub(a, simd_mul(b, c)) } +pub fn vmla_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + vmla_s16(a, b, vdup_lane_s16::(c)) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) + assert_instr(mla, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -27189,19 +28743,19 @@ pub fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { +pub fn vmla_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { static_assert_uimm_bits!(LANE, 2); - vmlsl_s16(a, b, vdup_lane_s16::(c)) + vmla_u16(a, b, vdup_lane_u16::(c)) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) + assert_instr(mla, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -27212,19 +28766,19 @@ pub fn vmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { +pub fn vmla_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { static_assert_uimm_bits!(LANE, 3); - vmlsl_s16(a, b, vdup_laneq_s16::(c)) + vmla_s16(a, b, vdup_laneq_s16::(c)) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) + assert_instr(mla, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -27235,19 +28789,19 @@ pub fn vmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlsl_s32(a, b, vdup_lane_s32::(c)) +pub fn vmla_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 3); + vmla_u16(a, b, vdup_laneq_u16::(c)) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl, LANE = 1) + assert_instr(mla, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -27258,19 +28812,19 @@ pub fn vmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { +pub fn vmlaq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { static_assert_uimm_bits!(LANE, 2); - vmlsl_s32(a, b, vdup_laneq_s32::(c)) + vmlaq_s16(a, b, vdupq_lane_s16::(c)) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) + assert_instr(mla, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -27281,19 +28835,19 @@ pub fn vmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { +pub fn vmlaq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { static_assert_uimm_bits!(LANE, 2); - vmlsl_u16(a, b, vdup_lane_u16::(c)) + vmlaq_u16(a, b, vdupq_lane_u16::(c)) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) + assert_instr(mla, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -27304,19 +28858,19 @@ pub fn vmlsl_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { +pub fn vmlaq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { static_assert_uimm_bits!(LANE, 3); - vmlsl_u16(a, b, vdup_laneq_u16::(c)) + vmlaq_s16(a, b, vdupq_laneq_s16::(c)) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) + assert_instr(mla, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -27327,19 +28881,19 @@ pub fn vmlsl_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmlsl_u32(a, b, vdup_lane_u32::(c)) +pub fn vmlaq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + vmlaq_u16(a, b, vdupq_laneq_u16::(c)) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl, LANE = 1) + assert_instr(mla, LANE = 1) )] #[rustc_legacy_const_generics(3)] #[cfg_attr( @@ -27350,20 +28904,21 @@ pub fn vmlsl_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmlsl_u32(a, b, vdup_laneq_u32::(c)) +pub fn vmla_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmla_s32(a, b, vdup_lane_s32::(c)) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -27372,19 +28927,21 @@ pub fn vmlsl_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - vmlsl_s16(a, b, vdup_n_s16(c)) +pub fn vmla_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmla_u32(a, b, vdup_lane_u32::(c)) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -27393,19 +28950,21 @@ pub fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - vmlsl_s32(a, b, vdup_n_s32(c)) +pub fn vmla_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmla_s32(a, b, vdup_laneq_s32::(c)) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -27414,19 +28973,21 @@ pub fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { - vmlsl_u16(a, b, vdup_n_u16(c)) +pub fn vmla_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmla_u32(a, b, vdup_laneq_u32::(c)) } -#[doc = "Vector widening multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -27435,19 +28996,21 @@ pub fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { - vmlsl_u32(a, b, vdup_n_u32(c)) +pub fn vmlaq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlaq_s32(a, b, vdupq_lane_s32::(c)) } -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s8)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -27456,19 +29019,21 @@ pub fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { - unsafe { simd_sub(a, vmull_s8(b, c)) } +pub fn vmlaq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlaq_u32(a, b, vdupq_lane_u32::(c)) } -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -27477,19 +29042,21 @@ pub fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - unsafe { simd_sub(a, vmull_s16(b, c)) } +pub fn vmlaq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlaq_s32(a, b, vdupq_laneq_s32::(c)) } -#[doc = "Signed multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smlsl) + assert_instr(mla, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -27498,18 +29065,19 @@ pub fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - unsafe { simd_sub(a, vmull_s32(b, c)) } +pub fn vmlaq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlaq_u32(a, b, vdupq_laneq_u32::(c)) } -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u8)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -27519,18 +29087,18 @@ pub fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { - unsafe { simd_sub(a, vmull_u8(b, c)) } +pub fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + vmla_f32(a, b, vdup_n_f32(c)) } -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -27540,18 +29108,18 @@ pub fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { - unsafe { simd_sub(a, vmull_u16(b, c)) } +pub fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + vmlaq_f32(a, b, vdupq_n_f32(c)) } -#[doc = "Unsigned multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umlsl) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -27561,108 +29129,81 @@ pub fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { - unsafe { simd_sub(a, vmull_u32(b, c)) } +pub fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { + vmla_s16(a, b, vdup_n_s16(c)) } -#[doc = "8-bit integer matrix multiply-accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_s32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s16)"] #[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smmla) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smmla.v4i32.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.smmla.v4i32.v16i8")] - fn _vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; - } - unsafe { _vmmlaq_s32(a, b, c) } +pub fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { + vmlaq_s16(a, b, vdupq_n_s16(c)) } -#[doc = "8-bit integer matrix multiply-accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_u32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u16)"] #[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ummla) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ummla.v4i32.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.ummla.v4i32.v16i8")] - fn _vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t; - } - unsafe { _vmmlaq_u32(a, b, c) } +pub fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { + vmla_u16(a, b, vdup_n_u16(c)) } -#[doc = "Duplicate element to vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_f16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(mla) )] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vmov_n_f16(a: f16) -> float16x4_t { - vdup_n_f16(a) -} -#[doc = "Duplicate element to vector"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_f16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vmovq_n_f16(a: f16) -> float16x8_t { - vdupq_n_f16(a) +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { + vmlaq_u16(a, b, vdupq_n_u16(c)) } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_f32)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -27672,18 +29213,18 @@ pub fn vmovq_n_f16(a: f16) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmov_n_f32(value: f32) -> float32x2_t { - vdup_n_f32(value) +pub fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { + vmla_s32(a, b, vdup_n_s32(c)) } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_p16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -27693,18 +29234,18 @@ pub fn vmov_n_f32(value: f32) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmov_n_p16(value: p16) -> poly16x4_t { - vdup_n_p16(value) +pub fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { + vmlaq_s32(a, b, vdupq_n_s32(c)) } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_p8)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -27714,18 +29255,18 @@ pub fn vmov_n_p16(value: p16) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmov_n_p8(value: p8) -> poly8x8_t { - vdup_n_p8(value) +pub fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { + vmla_u32(a, b, vdup_n_u32(c)) } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_s16)"] +#[doc = "Vector multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -27735,18 +29276,18 @@ pub fn vmov_n_p8(value: p8) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmov_n_s16(value: i16) -> int16x4_t { - vdup_n_s16(value) +pub fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { + vmlaq_u32(a, b, vdupq_n_u32(c)) } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_s32)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -27756,18 +29297,18 @@ pub fn vmov_n_s16(value: i16) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmov_n_s32(value: i32) -> int32x2_t { - vdup_n_s32(value) +pub fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_s64)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmov) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -27777,18 +29318,18 @@ pub fn vmov_n_s32(value: i32) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmov_n_s64(value: i64) -> int64x1_t { - vdup_n_s64(value) +pub fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_s8)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -27798,18 +29339,18 @@ pub fn vmov_n_s64(value: i64) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmov_n_s8(value: i8) -> int8x8_t { - vdup_n_s8(value) +pub fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_u16)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -27819,18 +29360,18 @@ pub fn vmov_n_s8(value: i8) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmov_n_u16(value: u16) -> uint16x4_t { - vdup_n_u16(value) +pub fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_u32)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -27840,18 +29381,18 @@ pub fn vmov_n_u16(value: u16) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmov_n_u32(value: u32) -> uint32x2_t { - vdup_n_u32(value) +pub fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_u64)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmov) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -27861,18 +29402,18 @@ pub fn vmov_n_u32(value: u32) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmov_n_u64(value: u64) -> uint64x1_t { - vdup_n_u64(value) +pub fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_u8)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -27882,18 +29423,18 @@ pub fn vmov_n_u64(value: u64) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmov_n_u8(value: u8) -> uint8x8_t { - vdup_n_u8(value) +pub fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_f32)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -27903,18 +29444,18 @@ pub fn vmov_n_u8(value: u8) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovq_n_f32(value: f32) -> float32x4_t { - vdupq_n_f32(value) +pub fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_p16)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -27924,18 +29465,18 @@ pub fn vmovq_n_f32(value: f32) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovq_n_p16(value: p16) -> poly16x8_t { - vdupq_n_p16(value) +pub fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_p8)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -27945,18 +29486,18 @@ pub fn vmovq_n_p16(value: p16) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovq_n_p8(value: p8) -> poly8x16_t { - vdupq_n_p8(value) +pub fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_s16)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -27966,18 +29507,18 @@ pub fn vmovq_n_p8(value: p8) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovq_n_s16(value: i16) -> int16x8_t { - vdupq_n_s16(value) +pub fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_s32)"] +#[doc = "Multiply-add to accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(mla) )] #[cfg_attr( not(target_arch = "arm"), @@ -27987,19 +29528,20 @@ pub fn vmovq_n_s16(value: i16) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovq_n_s32(value: i32) -> int32x4_t { - vdupq_n_s32(value) +pub fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + unsafe { simd_add(a, simd_mul(b, c)) } } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_s64)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(smlal, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28008,19 +29550,21 @@ pub fn vmovq_n_s32(value: i32) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovq_n_s64(value: i64) -> int64x2_t { - vdupq_n_s64(value) +pub fn vmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_s16(a, b, vdup_lane_s16::(c)) } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_s8)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(smlal, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28029,19 +29573,21 @@ pub fn vmovq_n_s64(value: i64) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovq_n_s8(value: i8) -> int8x16_t { - vdupq_n_s8(value) +pub fn vmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlal_s16(a, b, vdup_laneq_s16::(c)) } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_u16)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(smlal, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28050,19 +29596,21 @@ pub fn vmovq_n_s8(value: i8) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovq_n_u16(value: u16) -> uint16x8_t { - vdupq_n_u16(value) +pub fn vmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlal_s32(a, b, vdup_lane_s32::(c)) } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_u32)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(smlal, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28071,19 +29619,21 @@ pub fn vmovq_n_u16(value: u16) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovq_n_u32(value: u32) -> uint32x4_t { - vdupq_n_u32(value) +pub fn vmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_s32(a, b, vdup_laneq_s32::(c)) } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_u64)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(umlal, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28092,19 +29642,21 @@ pub fn vmovq_n_u32(value: u32) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovq_n_u64(value: u64) -> uint64x2_t { - vdupq_n_u64(value) +pub fn vmlal_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_u16(a, b, vdup_lane_u16::(c)) } -#[doc = "Duplicate vector element to vector or scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_u8)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(dup) + assert_instr(umlal, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28113,19 +29665,21 @@ pub fn vmovq_n_u64(value: u64) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovq_n_u8(value: u8) -> uint8x16_t { - vdupq_n_u8(value) +pub fn vmlal_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlal_u16(a, b, vdup_laneq_u16::(c)) } -#[doc = "Vector long move."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_s16)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sxtl) + assert_instr(umlal, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28134,19 +29688,21 @@ pub fn vmovq_n_u8(value: u8) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovl_s16(a: int16x4_t) -> int32x4_t { - unsafe { simd_cast(a) } +pub fn vmlal_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlal_u32(a, b, vdup_lane_u32::(c)) } -#[doc = "Vector long move."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_s32)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sxtl) + assert_instr(umlal, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28155,18 +29711,19 @@ pub fn vmovl_s16(a: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovl_s32(a: int32x2_t) -> int64x2_t { - unsafe { simd_cast(a) } +pub fn vmlal_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlal_u32(a, b, vdup_laneq_u32::(c)) } -#[doc = "Vector long move."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_s8)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sxtl) + assert_instr(smlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -28176,18 +29733,18 @@ pub fn vmovl_s32(a: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovl_s8(a: int8x8_t) -> int16x8_t { - unsafe { simd_cast(a) } +pub fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + vmlal_s16(a, b, vdup_n_s16(c)) } -#[doc = "Vector long move."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_u16)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uxtl) + assert_instr(smlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -28197,18 +29754,18 @@ pub fn vmovl_s8(a: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovl_u16(a: uint16x4_t) -> uint32x4_t { - unsafe { simd_cast(a) } +pub fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + vmlal_s32(a, b, vdup_n_s32(c)) } -#[doc = "Vector long move."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_u32)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uxtl) + assert_instr(umlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -28218,18 +29775,18 @@ pub fn vmovl_u16(a: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovl_u32(a: uint32x2_t) -> uint64x2_t { - unsafe { simd_cast(a) } +pub fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { + vmlal_u16(a, b, vdup_n_u16(c)) } -#[doc = "Vector long move."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_u8)"] +#[doc = "Vector widening multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uxtl) + assert_instr(umlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -28239,18 +29796,18 @@ pub fn vmovl_u32(a: uint32x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovl_u8(a: uint8x8_t) -> uint16x8_t { - unsafe { simd_cast(a) } +pub fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { + vmlal_u32(a, b, vdup_n_u32(c)) } -#[doc = "Vector narrow integer."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_s16)"] +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) + assert_instr(smlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -28260,18 +29817,18 @@ pub fn vmovl_u8(a: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovn_s16(a: int16x8_t) -> int8x8_t { - unsafe { simd_cast(a) } +pub fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { + unsafe { simd_add(a, vmull_s8(b, c)) } } -#[doc = "Vector narrow integer."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_s32)"] +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) + assert_instr(smlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -28281,18 +29838,18 @@ pub fn vmovn_s16(a: int16x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovn_s32(a: int32x4_t) -> int16x4_t { - unsafe { simd_cast(a) } +pub fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + unsafe { simd_add(a, vmull_s16(b, c)) } } -#[doc = "Vector narrow integer."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_s64)"] +#[doc = "Signed multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) + assert_instr(smlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -28302,18 +29859,18 @@ pub fn vmovn_s32(a: int32x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovn_s64(a: int64x2_t) -> int32x2_t { - unsafe { simd_cast(a) } +pub fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + unsafe { simd_add(a, vmull_s32(b, c)) } } -#[doc = "Vector narrow integer."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_u16)"] +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) + assert_instr(umlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -28323,18 +29880,18 @@ pub fn vmovn_s64(a: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovn_u16(a: uint16x8_t) -> uint8x8_t { - unsafe { simd_cast(a) } +pub fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { + unsafe { simd_add(a, vmull_u8(b, c)) } } -#[doc = "Vector narrow integer."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_u32)"] +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) + assert_instr(umlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -28344,18 +29901,18 @@ pub fn vmovn_u16(a: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovn_u32(a: uint32x4_t) -> uint16x4_t { - unsafe { simd_cast(a) } +pub fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + unsafe { simd_add(a, vmull_u16(b, c)) } } -#[doc = "Vector narrow integer."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_u64)"] +#[doc = "Unsigned multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(xtn) + assert_instr(umlal) )] #[cfg_attr( not(target_arch = "arm"), @@ -28365,63 +29922,62 @@ pub fn vmovn_u32(a: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmovn_u64(a: uint64x2_t) -> uint32x2_t { - unsafe { simd_cast(a) } +pub fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + unsafe { simd_add(a, vmull_u32(b, c)) } } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f16)"] +#[doc = "Floating-point multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f32)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f16"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(fmul) )] -#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vmul_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - unsafe { simd_mul(a, b) } +pub fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f16)"] +#[doc = "Floating-point multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f32)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f16"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(fmul) )] -#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vmulq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - unsafe { simd_mul(a, b) } +pub fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28430,19 +29986,25 @@ pub fn vmulq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe { simd_mul(a, b) } +pub fn vmls_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmls_f32(a, b, vdup_lane_f32::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28451,68 +30013,79 @@ pub fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe { simd_mul(a, b) } +pub fn vmls_laneq_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, +) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmls_f32(a, b, vdup_laneq_f32::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_f32)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(fmul, LANE = 1) )] -#[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vmul_lane_f16(a: float16x4_t, v: float16x4_t) -> float16x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { simd_mul(a, vdup_lane_f16::(v)) } +pub fn vmlsq_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, +) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlsq_f32(a, b, vdupq_lane_f32::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_f32)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(fmul, LANE = 1) )] -#[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vmulq_lane_f16(a: float16x8_t, v: float16x4_t) -> float16x8_t { +pub fn vmlsq_laneq_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, +) -> float32x4_t { static_assert_uimm_bits!(LANE, 2); - unsafe { simd_mul(a, vdupq_lane_f16::(v)) } + vmlsq_f32(a, b, vdupq_laneq_f32::(c)) } -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 0) + assert_instr(mls, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28521,21 +30094,21 @@ pub fn vmulq_lane_f16(a: float16x8_t, v: float16x4_t) -> float1 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { simd_mul(a, vdup_lane_f32::(b)) } +pub fn vmls_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + vmls_s16(a, b, vdup_lane_s16::(c)) } -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 0) + assert_instr(mls, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28544,21 +30117,21 @@ pub fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> float32 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { +pub fn vmls_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { static_assert_uimm_bits!(LANE, 2); - unsafe { simd_mul(a, vdup_laneq_f32::(b)) } + vmls_u16(a, b, vdup_lane_u16::(c)) } -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 0) + assert_instr(mls, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28567,21 +30140,21 @@ pub fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> float3 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { simd_mul(a, vdupq_lane_f32::(b)) } +pub fn vmls_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + vmls_s16(a, b, vdup_laneq_s16::(c)) } -#[doc = "Floating-point multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul, LANE = 0) + assert_instr(mls, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28590,21 +30163,21 @@ pub fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> float3 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { simd_mul(a, vdupq_laneq_f32::(b)) } +pub fn vmls_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 3); + vmls_u16(a, b, vdup_laneq_u16::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(mls, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28613,21 +30186,21 @@ pub fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { +pub fn vmlsq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { static_assert_uimm_bits!(LANE, 2); - unsafe { simd_mul(a, vdup_lane_s16::(b)) } + vmlsq_s16(a, b, vdupq_lane_s16::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(mls, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28636,21 +30209,21 @@ pub fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { +pub fn vmlsq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { static_assert_uimm_bits!(LANE, 2); - unsafe { simd_mul(a, vdupq_lane_s16::(b)) } + vmlsq_u16(a, b, vdupq_lane_u16::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(mls, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28659,21 +30232,21 @@ pub fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { simd_mul(a, vdup_lane_s32::(b)) } +pub fn vmlsq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + vmlsq_s16(a, b, vdupq_laneq_s16::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(mls, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28682,21 +30255,21 @@ pub fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { simd_mul(a, vdupq_lane_s32::(b)) } +pub fn vmlsq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + vmlsq_u16(a, b, vdupq_laneq_u16::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(mls, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28705,21 +30278,21 @@ pub fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { simd_mul(a, vdup_lane_u16::(b)) } +pub fn vmls_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmls_s32(a, b, vdup_lane_s32::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(mls, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28728,21 +30301,21 @@ pub fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { simd_mul(a, vdupq_lane_u16::(b)) } +pub fn vmls_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + vmls_u32(a, b, vdup_lane_u32::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(mls, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28751,21 +30324,21 @@ pub fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint16x8 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { simd_mul(a, vdup_lane_u32::(b)) } +pub fn vmls_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmls_s32(a, b, vdup_laneq_s32::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(mls, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28774,21 +30347,21 @@ pub fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { simd_mul(a, vdupq_lane_u32::(b)) } +pub fn vmls_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 2); + vmls_u32(a, b, vdup_laneq_u32::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(mls, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28797,21 +30370,21 @@ pub fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint32x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { simd_mul(a, vdup_laneq_s16::(b)) } +pub fn vmlsq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlsq_s32(a, b, vdupq_lane_s32::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(mls, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28820,21 +30393,21 @@ pub fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { simd_mul(a, vdupq_laneq_s16::(b)) } +pub fn vmlsq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); + vmlsq_u32(a, b, vdupq_lane_u32::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(mls, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28843,21 +30416,21 @@ pub fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { +pub fn vmlsq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 2); - unsafe { simd_mul(a, vdup_laneq_s32::(b)) } + vmlsq_s32(a, b, vdupq_laneq_s32::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(mls, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28866,21 +30439,20 @@ pub fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { +pub fn vmlsq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 2); - unsafe { simd_mul(a, vdupq_laneq_s32::(b)) } + vmlsq_u32(a, b, vdupq_laneq_u32::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(fmul) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28889,21 +30461,19 @@ pub fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { simd_mul(a, vdup_laneq_u16::(b)) } +pub fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + vmls_f32(a, b, vdup_n_f32(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(fmul) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28912,21 +30482,19 @@ pub fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint16x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { simd_mul(a, vdupq_laneq_u16::(b)) } +pub fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + vmlsq_f32(a, b, vdupq_n_f32(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(mls) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28935,21 +30503,19 @@ pub fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { simd_mul(a, vdup_laneq_u32::(b)) } +pub fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { + vmls_s16(a, b, vdup_n_s16(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul, LANE = 1) + assert_instr(mls) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -28958,49 +30524,18 @@ pub fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint32x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { simd_mul(a, vdupq_laneq_u32::(b)) } -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vmul_n_f16(a: float16x4_t, b: f16) -> float16x4_t { - unsafe { simd_mul(a, vdup_n_f16(b)) } -} -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) -)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vmulq_n_f16(a: float16x8_t, b: f16) -> float16x8_t { - unsafe { simd_mul(a, vdupq_n_f16(b)) } +pub fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { + vmlsq_s16(a, b, vdupq_n_s16(c)) } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29010,18 +30545,18 @@ pub fn vmulq_n_f16(a: float16x8_t, b: f16) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { - unsafe { simd_mul(a, vdup_n_f32(b)) } +pub fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { + vmls_u16(a, b, vdup_n_u16(c)) } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29031,18 +30566,18 @@ pub fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { - unsafe { simd_mul(a, vdupq_n_f32(b)) } +pub fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { + vmlsq_u16(a, b, vdupq_n_u16(c)) } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29052,18 +30587,18 @@ pub fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { - unsafe { simd_mul(a, vdup_n_s16(b)) } +pub fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { + vmls_s32(a, b, vdup_n_s32(c)) } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s16)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29073,18 +30608,18 @@ pub fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { - unsafe { simd_mul(a, vdupq_n_s16(b)) } +pub fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { + vmlsq_s32(a, b, vdupq_n_s32(c)) } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29094,18 +30629,18 @@ pub fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { - unsafe { simd_mul(a, vdup_n_s32(b)) } +pub fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { + vmls_u32(a, b, vdup_n_u32(c)) } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s32)"] +#[doc = "Vector multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29115,18 +30650,18 @@ pub fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { - unsafe { simd_mul(a, vdupq_n_s32(b)) } +pub fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { + vmlsq_u32(a, b, vdupq_n_u32(c)) } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u16)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29136,18 +30671,18 @@ pub fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { - unsafe { simd_mul(a, vdup_n_u16(b)) } +pub fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u16)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29157,18 +30692,18 @@ pub fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { - unsafe { simd_mul(a, vdupq_n_u16(b)) } +pub fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u32)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29178,18 +30713,18 @@ pub fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { - unsafe { simd_mul(a, vdup_n_u32(b)) } +pub fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Vector multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u32)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29199,18 +30734,18 @@ pub fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { - unsafe { simd_mul(a, vdupq_n_u32(b)) } +pub fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Polynomial multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_p8)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(pmul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29220,26 +30755,18 @@ pub fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.pmul.v8i8" - )] - fn _vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; - } - unsafe { _vmul_p8(a, b) } +pub fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Polynomial multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_p8)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(pmul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29249,26 +30776,18 @@ pub fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.pmul.v16i8" - )] - fn _vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; - } - unsafe { _vmulq_p8(a, b) } +pub fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s16)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29278,18 +30797,18 @@ pub fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe { simd_mul(a, b) } +pub fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s16)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29299,18 +30818,18 @@ pub fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe { simd_mul(a, b) } +pub fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u16)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29320,18 +30839,18 @@ pub fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_mul(a, b) } +pub fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u16)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29341,18 +30860,18 @@ pub fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_mul(a, b) } +pub fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s32)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29362,18 +30881,18 @@ pub fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe { simd_mul(a, b) } +pub fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s32)"] +#[doc = "Multiply-subtract from accumulator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(mls) )] #[cfg_attr( not(target_arch = "arm"), @@ -29383,19 +30902,20 @@ pub fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe { simd_mul(a, b) } +pub fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + unsafe { simd_sub(a, simd_mul(b, c)) } } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u32)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(smlsl, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29404,19 +30924,21 @@ pub fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_mul(a, b) } +pub fn vmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_s16(a, b, vdup_lane_s16::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u32)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(smlsl, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29425,19 +30947,21 @@ pub fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_mul(a, b) } +pub fn vmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlsl_s16(a, b, vdup_laneq_s16::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s8)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(smlsl, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29446,19 +30970,21 @@ pub fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe { simd_mul(a, b) } +pub fn vmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlsl_s32(a, b, vdup_lane_s32::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s8)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(smlsl, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29467,19 +30993,21 @@ pub fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe { simd_mul(a, b) } +pub fn vmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_s32(a, b, vdup_laneq_s32::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u8)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(umlsl, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29488,19 +31016,21 @@ pub fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_mul(a, b) } +pub fn vmlsl_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_u16(a, b, vdup_lane_u16::(c)) } -#[doc = "Multiply"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u8)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mul) + assert_instr(umlsl, LANE = 1) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29509,20 +31039,21 @@ pub fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe { simd_mul(a, b) } +pub fn vmlsl_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmlsl_u16(a, b, vdup_laneq_u16::(c)) } -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s16)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull, LANE = 1) + assert_instr(umlsl, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29531,21 +31062,21 @@ pub fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmull_s16(a, vdup_lane_s16::(b)) +pub fn vmlsl_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmlsl_u32(a, b, vdup_lane_u32::(c)) } -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s16)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull, LANE = 1) + assert_instr(umlsl, LANE = 1) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29554,21 +31085,20 @@ pub fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmull_s16(a, vdup_laneq_s16::(b)) +pub fn vmlsl_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmlsl_u32(a, b, vdup_laneq_u32::(c)) } -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s32)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull, LANE = 1) + assert_instr(smlsl) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29577,21 +31107,19 @@ pub fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmull_s32(a, vdup_lane_s32::(b)) +pub fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + vmlsl_s16(a, b, vdup_n_s16(c)) } -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s32)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull, LANE = 1) + assert_instr(smlsl) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29600,21 +31128,19 @@ pub fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmull_s32(a, vdup_laneq_s32::(b)) +pub fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + vmlsl_s32(a, b, vdup_n_s32(c)) } -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u16)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull, LANE = 1) + assert_instr(umlsl) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29623,21 +31149,19 @@ pub fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - vmull_u16(a, vdup_lane_u16::(b)) +pub fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { + vmlsl_u16(a, b, vdup_n_u16(c)) } -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u16)"] +#[doc = "Vector widening multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull, LANE = 1) + assert_instr(umlsl) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29646,21 +31170,19 @@ pub fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 3); - vmull_u16(a, vdup_laneq_u16::(b)) +pub fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { + vmlsl_u32(a, b, vdup_n_u32(c)) } -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u32)"] +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull, LANE = 1) + assert_instr(smlsl) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29669,21 +31191,19 @@ pub fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - vmull_u32(a, vdup_lane_u32::(b)) +pub fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { + unsafe { simd_sub(a, vmull_s8(b, c)) } } -#[doc = "Vector long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u32)"] +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull, LANE = 1) + assert_instr(smlsl) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -29692,19 +31212,18 @@ pub fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 2); - vmull_u32(a, vdup_laneq_u32::(b)) +pub fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + unsafe { simd_sub(a, vmull_s16(b, c)) } } -#[doc = "Vector long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s16)"] +#[doc = "Signed multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) + assert_instr(smlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -29714,18 +31233,18 @@ pub fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint64x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { - vmull_s16(a, vdup_n_s16(b)) +pub fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + unsafe { simd_sub(a, vmull_s32(b, c)) } } -#[doc = "Vector long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s32)"] +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) + assert_instr(umlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -29735,18 +31254,18 @@ pub fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { - vmull_s32(a, vdup_n_s32(b)) +pub fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { + unsafe { simd_sub(a, vmull_u8(b, c)) } } -#[doc = "Vector long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u16)"] +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) + assert_instr(umlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -29756,18 +31275,18 @@ pub fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { - vmull_u16(a, vdup_n_u16(b)) +pub fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + unsafe { simd_sub(a, vmull_u16(b, c)) } } -#[doc = "Vector long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u32)"] +#[doc = "Unsigned multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) + assert_instr(umlsl) )] #[cfg_attr( not(target_arch = "arm"), @@ -29777,89 +31296,108 @@ pub fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { - vmull_u32(a, vdup_n_u32(b)) +pub fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + unsafe { simd_sub(a, vmull_u32(b, c)) } } -#[doc = "Polynomial multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p8)"] +#[doc = "8-bit integer matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_s32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.p8"))] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(pmull) + assert_instr(smmla) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { +pub fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.pmull.v8i16" + link_name = "llvm.aarch64.neon.smmla.v4i32.v16i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullp.v8i16")] - fn _vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.smmla.v4i32.v16i8")] + fn _vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t; } - unsafe { _vmull_p8(a, b) } + unsafe { _vmmlaq_s32(a, b, c) } } -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s16)"] +#[doc = "8-bit integer matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s16"))] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) + assert_instr(ummla) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - unsafe { simd_mul(simd_cast(a), simd_cast(b)) } +pub fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ummla.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.ummla.v4i32.v16i8")] + fn _vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t; + } + unsafe { _vmmlaq_u32(a, b, c) } } -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s32)"] +#[doc = "Duplicate element to vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_f16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(dup) )] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vmov_n_f16(a: f16) -> float16x4_t { + vdup_n_f16(a) +} +#[doc = "Duplicate element to vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_f16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(dup) )] -pub fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - unsafe { simd_mul(simd_cast(a), simd_cast(b)) } +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vmovq_n_f16(a: f16) -> float16x8_t { + vdupq_n_f16(a) } -#[doc = "Signed multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s8)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smull) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -29869,18 +31407,18 @@ pub fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { - unsafe { simd_mul(simd_cast(a), simd_cast(b)) } +pub fn vmov_n_f32(value: f32) -> float32x2_t { + vdup_n_f32(value) } -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u8)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_p16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -29890,18 +31428,18 @@ pub fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { - unsafe { simd_mul(simd_cast(a), simd_cast(b)) } +pub fn vmov_n_p16(value: p16) -> poly16x4_t { + vdup_n_p16(value) } -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -29911,18 +31449,18 @@ pub fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { - unsafe { simd_mul(simd_cast(a), simd_cast(b)) } +pub fn vmov_n_p8(value: p8) -> poly8x8_t { + vdup_n_p8(value) } -#[doc = "Unsigned multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umull) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -29932,18 +31470,18 @@ pub fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { - unsafe { simd_mul(simd_cast(a), simd_cast(b)) } +pub fn vmov_n_s16(value: i16) -> int16x4_t { + vdup_n_s16(value) } -#[doc = "Vector bitwise not."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_p8)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -29953,19 +31491,18 @@ pub fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmvn_p8(a: poly8x8_t) -> poly8x8_t { - let b = poly8x8_t::splat(255); - unsafe { simd_xor(a, b) } +pub fn vmov_n_s32(value: i32) -> int32x2_t { + vdup_n_s32(value) } -#[doc = "Vector bitwise not."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_s16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(fmov) )] #[cfg_attr( not(target_arch = "arm"), @@ -29975,19 +31512,18 @@ pub fn vmvn_p8(a: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmvn_s16(a: int16x4_t) -> int16x4_t { - let b = int16x4_t::splat(-1); - unsafe { simd_xor(a, b) } +pub fn vmov_n_s64(value: i64) -> int64x1_t { + vdup_n_s64(value) } -#[doc = "Vector bitwise not."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_s32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -29997,19 +31533,18 @@ pub fn vmvn_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmvn_s32(a: int32x2_t) -> int32x2_t { - let b = int32x2_t::splat(-1); - unsafe { simd_xor(a, b) } +pub fn vmov_n_s8(value: i8) -> int8x8_t { + vdup_n_s8(value) } -#[doc = "Vector bitwise not."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_s8)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -30019,19 +31554,18 @@ pub fn vmvn_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmvn_s8(a: int8x8_t) -> int8x8_t { - let b = int8x8_t::splat(-1); - unsafe { simd_xor(a, b) } +pub fn vmov_n_u16(value: u16) -> uint16x4_t { + vdup_n_u16(value) } -#[doc = "Vector bitwise not."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_u16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -30041,19 +31575,18 @@ pub fn vmvn_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmvn_u16(a: uint16x4_t) -> uint16x4_t { - let b = uint16x4_t::splat(65_535); - unsafe { simd_xor(a, b) } +pub fn vmov_n_u32(value: u32) -> uint32x2_t { + vdup_n_u32(value) } -#[doc = "Vector bitwise not."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_u32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(fmov) )] #[cfg_attr( not(target_arch = "arm"), @@ -30063,19 +31596,18 @@ pub fn vmvn_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmvn_u32(a: uint32x2_t) -> uint32x2_t { - let b = uint32x2_t::splat(4_294_967_295); - unsafe { simd_xor(a, b) } +pub fn vmov_n_u64(value: u64) -> uint64x1_t { + vdup_n_u64(value) } -#[doc = "Vector bitwise not."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_u8)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -30085,19 +31617,18 @@ pub fn vmvn_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmvn_u8(a: uint8x8_t) -> uint8x8_t { - let b = uint8x8_t::splat(255); - unsafe { simd_xor(a, b) } +pub fn vmov_n_u8(value: u8) -> uint8x8_t { + vdup_n_u8(value) } -#[doc = "Vector bitwise not."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_p8)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -30107,19 +31638,18 @@ pub fn vmvn_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t { - let b = poly8x16_t::splat(255); - unsafe { simd_xor(a, b) } +pub fn vmovq_n_f32(value: f32) -> float32x4_t { + vdupq_n_f32(value) } -#[doc = "Vector bitwise not."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_s16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_p16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -30129,19 +31659,18 @@ pub fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmvnq_s16(a: int16x8_t) -> int16x8_t { - let b = int16x8_t::splat(-1); - unsafe { simd_xor(a, b) } +pub fn vmovq_n_p16(value: p16) -> poly16x8_t { + vdupq_n_p16(value) } -#[doc = "Vector bitwise not."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_s32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -30151,19 +31680,18 @@ pub fn vmvnq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmvnq_s32(a: int32x4_t) -> int32x4_t { - let b = int32x4_t::splat(-1); - unsafe { simd_xor(a, b) } +pub fn vmovq_n_p8(value: p8) -> poly8x16_t { + vdupq_n_p8(value) } -#[doc = "Vector bitwise not."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_s8)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -30173,19 +31701,18 @@ pub fn vmvnq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmvnq_s8(a: int8x16_t) -> int8x16_t { - let b = int8x16_t::splat(-1); - unsafe { simd_xor(a, b) } +pub fn vmovq_n_s16(value: i16) -> int16x8_t { + vdupq_n_s16(value) } -#[doc = "Vector bitwise not."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_u16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -30195,19 +31722,18 @@ pub fn vmvnq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t { - let b = uint16x8_t::splat(65_535); - unsafe { simd_xor(a, b) } +pub fn vmovq_n_s32(value: i32) -> int32x4_t { + vdupq_n_s32(value) } -#[doc = "Vector bitwise not."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_u32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -30217,19 +31743,18 @@ pub fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t { - let b = uint32x4_t::splat(4_294_967_295); - unsafe { simd_xor(a, b) } +pub fn vmovq_n_s64(value: i64) -> int64x2_t { + vdupq_n_s64(value) } -#[doc = "Vector bitwise not."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_u8)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(mvn) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -30239,63 +31764,60 @@ pub fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t { - let b = uint8x16_t::splat(255); - unsafe { simd_xor(a, b) } +pub fn vmovq_n_s8(value: i8) -> int8x16_t { + vdupq_n_s8(value) } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_u16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f16"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fneg) + assert_instr(dup) )] -#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vneg_f16(a: float16x4_t) -> float16x4_t { - unsafe { simd_neg(a) } +pub fn vmovq_n_u16(value: u16) -> uint16x8_t { + vdupq_n_u16(value) } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f16)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_u32)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f16"))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fneg) + assert_instr(dup) )] -#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vnegq_f16(a: float16x8_t) -> float16x8_t { - unsafe { simd_neg(a) } +pub fn vmovq_n_u32(value: u32) -> uint32x4_t { + vdupq_n_u32(value) } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fneg) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -30305,18 +31827,18 @@ pub fn vnegq_f16(a: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vneg_f32(a: float32x2_t) -> float32x2_t { - unsafe { simd_neg(a) } +pub fn vmovq_n_u64(value: u64) -> uint64x2_t { + vdupq_n_u64(value) } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f32)"] +#[doc = "Duplicate vector element to vector or scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fneg) + assert_instr(dup) )] #[cfg_attr( not(target_arch = "arm"), @@ -30326,18 +31848,18 @@ pub fn vneg_f32(a: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vnegq_f32(a: float32x4_t) -> float32x4_t { - unsafe { simd_neg(a) } +pub fn vmovq_n_u8(value: u8) -> uint8x16_t { + vdupq_n_u8(value) } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s8)"] +#[doc = "Vector long move."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) + assert_instr(sxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -30347,18 +31869,18 @@ pub fn vnegq_f32(a: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vneg_s8(a: int8x8_t) -> int8x8_t { - unsafe { simd_neg(a) } +pub fn vmovl_s16(a: int16x4_t) -> int32x4_t { + unsafe { simd_cast(a) } } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s8)"] +#[doc = "Vector long move."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) + assert_instr(sxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -30368,18 +31890,18 @@ pub fn vneg_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vnegq_s8(a: int8x16_t) -> int8x16_t { - unsafe { simd_neg(a) } +pub fn vmovl_s32(a: int32x2_t) -> int64x2_t { + unsafe { simd_cast(a) } } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s16)"] +#[doc = "Vector long move."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) + assert_instr(sxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -30389,18 +31911,18 @@ pub fn vnegq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vneg_s16(a: int16x4_t) -> int16x4_t { - unsafe { simd_neg(a) } +pub fn vmovl_s8(a: int8x8_t) -> int16x8_t { + unsafe { simd_cast(a) } } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s16)"] +#[doc = "Vector long move."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) + assert_instr(uxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -30410,18 +31932,18 @@ pub fn vneg_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vnegq_s16(a: int16x8_t) -> int16x8_t { - unsafe { simd_neg(a) } +pub fn vmovl_u16(a: uint16x4_t) -> uint32x4_t { + unsafe { simd_cast(a) } } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s32)"] +#[doc = "Vector long move."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) + assert_instr(uxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -30431,18 +31953,18 @@ pub fn vnegq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vneg_s32(a: int32x2_t) -> int32x2_t { - unsafe { simd_neg(a) } +pub fn vmovl_u32(a: uint32x2_t) -> uint64x2_t { + unsafe { simd_cast(a) } } -#[doc = "Negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s32)"] +#[doc = "Vector long move."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(neg) + assert_instr(uxtl) )] #[cfg_attr( not(target_arch = "arm"), @@ -30452,18 +31974,18 @@ pub fn vneg_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vnegq_s32(a: int32x4_t) -> int32x4_t { - unsafe { simd_neg(a) } +pub fn vmovl_u8(a: uint8x8_t) -> uint16x8_t { + unsafe { simd_cast(a) } } -#[doc = "Vector bitwise inclusive OR NOT"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_s16)"] +#[doc = "Vector narrow integer."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) + assert_instr(xtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -30473,19 +31995,18 @@ pub fn vnegq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - let c = int16x4_t::splat(-1); - unsafe { simd_or(simd_xor(b, c), a) } +pub fn vmovn_s16(a: int16x8_t) -> int8x8_t { + unsafe { simd_cast(a) } } -#[doc = "Vector bitwise inclusive OR NOT"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_s32)"] +#[doc = "Vector narrow integer."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) + assert_instr(xtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -30495,19 +32016,18 @@ pub fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - let c = int32x2_t::splat(-1); - unsafe { simd_or(simd_xor(b, c), a) } +pub fn vmovn_s32(a: int32x4_t) -> int16x4_t { + unsafe { simd_cast(a) } } -#[doc = "Vector bitwise inclusive OR NOT"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_s64)"] +#[doc = "Vector narrow integer."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) + assert_instr(xtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -30517,19 +32037,18 @@ pub fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - let c = int64x1_t::splat(-1); - unsafe { simd_or(simd_xor(b, c), a) } +pub fn vmovn_s64(a: int64x2_t) -> int32x2_t { + unsafe { simd_cast(a) } } -#[doc = "Vector bitwise inclusive OR NOT"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_s8)"] +#[doc = "Vector narrow integer."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) + assert_instr(xtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -30539,19 +32058,18 @@ pub fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - let c = int8x8_t::splat(-1); - unsafe { simd_or(simd_xor(b, c), a) } +pub fn vmovn_u16(a: uint16x8_t) -> uint8x8_t { + unsafe { simd_cast(a) } } -#[doc = "Vector bitwise inclusive OR NOT"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_s16)"] +#[doc = "Vector narrow integer."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) + assert_instr(xtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -30561,19 +32079,18 @@ pub fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - let c = int16x8_t::splat(-1); - unsafe { simd_or(simd_xor(b, c), a) } +pub fn vmovn_u32(a: uint32x4_t) -> uint16x4_t { + unsafe { simd_cast(a) } } -#[doc = "Vector bitwise inclusive OR NOT"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_s32)"] +#[doc = "Vector narrow integer."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) + assert_instr(xtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -30583,63 +32100,62 @@ pub fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - let c = int32x4_t::splat(-1); - unsafe { simd_or(simd_xor(b, c), a) } +pub fn vmovn_u64(a: uint64x2_t) -> uint32x2_t { + unsafe { simd_cast(a) } } -#[doc = "Vector bitwise inclusive OR NOT"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_s64)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) + assert_instr(fmul) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - let c = int64x2_t::splat(-1); - unsafe { simd_or(simd_xor(b, c), a) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vmul_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe { simd_mul(a, b) } } -#[doc = "Vector bitwise inclusive OR NOT"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_s8)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) + assert_instr(fmul) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - let c = int8x16_t::splat(-1); - unsafe { simd_or(simd_xor(b, c), a) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vmulq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe { simd_mul(a, b) } } -#[doc = "Vector bitwise inclusive OR NOT"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -30649,19 +32165,18 @@ pub fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let c = int16x4_t::splat(-1); - unsafe { simd_or(simd_xor(b, transmute(c)), a) } +pub fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { simd_mul(a, b) } } -#[doc = "Vector bitwise inclusive OR NOT"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_u32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -30671,64 +32186,68 @@ pub fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let c = int32x2_t::splat(-1); - unsafe { simd_or(simd_xor(b, transmute(c)), a) } +pub fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { simd_mul(a, b) } } -#[doc = "Vector bitwise inclusive OR NOT"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_u64)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - let c = int64x1_t::splat(-1); - unsafe { simd_or(simd_xor(b, transmute(c)), a) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vmul_lane_f16(a: float16x4_t, v: float16x4_t) -> float16x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_mul(a, vdup_lane_f16::(v)) } } -#[doc = "Vector bitwise inclusive OR NOT"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_u8)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) + assert_instr(fmul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let c = int8x8_t::splat(-1); - unsafe { simd_or(simd_xor(b, transmute(c)), a) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vmulq_lane_f16(a: float16x8_t, v: float16x4_t) -> float16x8_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_mul(a, vdupq_lane_f16::(v)) } } -#[doc = "Vector bitwise inclusive OR NOT"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_u16)"] +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) + assert_instr(fmul, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -30737,20 +32256,21 @@ pub fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - let c = int16x8_t::splat(-1); - unsafe { simd_or(simd_xor(b, transmute(c)), a) } +pub fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { simd_mul(a, vdup_lane_f32::(b)) } } -#[doc = "Vector bitwise inclusive OR NOT"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_u32)"] +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) + assert_instr(fmul, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -30759,20 +32279,21 @@ pub fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - let c = int32x4_t::splat(-1); - unsafe { simd_or(simd_xor(b, transmute(c)), a) } +pub fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_mul(a, vdup_laneq_f32::(b)) } } -#[doc = "Vector bitwise inclusive OR NOT"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_u64)"] +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) + assert_instr(fmul, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -30781,20 +32302,21 @@ pub fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - let c = int64x2_t::splat(-1); - unsafe { simd_or(simd_xor(b, transmute(c)), a) } +pub fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { simd_mul(a, vdupq_lane_f32::(b)) } } -#[doc = "Vector bitwise inclusive OR NOT"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_u8)"] +#[doc = "Floating-point multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orn) + assert_instr(fmul, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -30803,20 +32325,21 @@ pub fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - let c = int8x16_t::splat(-1); - unsafe { simd_or(simd_xor(b, transmute(c)), a) } +pub fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_mul(a, vdupq_laneq_f32::(b)) } } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s8)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -30825,19 +32348,21 @@ pub fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe { simd_or(a, b) } +pub fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_mul(a, vdup_lane_s16::(b)) } } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s8)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -30846,19 +32371,21 @@ pub fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe { simd_or(a, b) } +pub fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_mul(a, vdupq_lane_s16::(b)) } } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -30867,19 +32394,21 @@ pub fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe { simd_or(a, b) } +pub fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { simd_mul(a, vdup_lane_s32::(b)) } } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -30888,19 +32417,21 @@ pub fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe { simd_or(a, b) } +pub fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { simd_mul(a, vdupq_lane_s32::(b)) } } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -30909,19 +32440,21 @@ pub fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe { simd_or(a, b) } +pub fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_mul(a, vdup_lane_u16::(b)) } } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -30930,19 +32463,21 @@ pub fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe { simd_or(a, b) } +pub fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_mul(a, vdupq_lane_u16::(b)) } } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s64)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -30951,19 +32486,21 @@ pub fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe { simd_or(a, b) } +pub fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { simd_mul(a, vdup_lane_u32::(b)) } } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s64)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -30972,19 +32509,21 @@ pub fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe { simd_or(a, b) } +pub fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { simd_mul(a, vdupq_lane_u32::(b)) } } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u8)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -30993,19 +32532,21 @@ pub fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_or(a, b) } +pub fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { simd_mul(a, vdup_laneq_s16::(b)) } } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u8)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -31014,19 +32555,21 @@ pub fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe { simd_or(a, b) } +pub fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { simd_mul(a, vdupq_laneq_s16::(b)) } } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -31035,19 +32578,21 @@ pub fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_or(a, b) } +pub fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_mul(a, vdup_laneq_s32::(b)) } } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -31056,19 +32601,21 @@ pub fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_or(a, b) } +pub fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_mul(a, vdupq_laneq_s32::(b)) } } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -31077,19 +32624,21 @@ pub fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_or(a, b) } +pub fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { simd_mul(a, vdup_laneq_u16::(b)) } } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -31098,19 +32647,21 @@ pub fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_or(a, b) } +pub fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { simd_mul(a, vdupq_laneq_u16::(b)) } } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u64)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -31119,19 +32670,21 @@ pub fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - unsafe { simd_or(a, b) } +pub fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_mul(a, vdup_laneq_u32::(b)) } } -#[doc = "Vector bitwise or (immediate, inclusive)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u64)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(orr) + assert_instr(mul, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -31140,48 +32693,49 @@ pub fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe { simd_or(a, b) } +pub fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_mul(a, vdupq_laneq_u32::(b)) } } -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s8)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(fmul) )] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vmul_n_f16(a: float16x4_t, b: f16) -> float16x4_t { + unsafe { simd_mul(a, vdup_n_f16(b)) } +} +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fmul) )] -pub fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { - let x: int16x4_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_s8(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - unsafe { - x = simd_add(vpaddl_s8(b), a); - }; - x +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vmulq_n_f16(a: float16x8_t, b: f16) -> float16x8_t { + unsafe { simd_mul(a, vdupq_n_f16(b)) } } -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s8)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31191,27 +32745,18 @@ pub fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { - let x: int16x8_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_s8(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - unsafe { - x = simd_add(vpaddlq_s8(b), a); - }; - x +pub fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { + unsafe { simd_mul(a, vdup_n_f32(b)) } } -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s16)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) + assert_instr(fmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31221,27 +32766,18 @@ pub fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { - let x: int32x2_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_s16(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - unsafe { - x = simd_add(vpaddl_s16(b), a); - }; - x +pub fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { + unsafe { simd_mul(a, vdupq_n_f32(b)) } } -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s16)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31251,27 +32787,18 @@ pub fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { - let x: int32x4_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_s16(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - unsafe { - x = simd_add(vpaddlq_s16(b), a); - }; - x +pub fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { + unsafe { simd_mul(a, vdup_n_s16(b)) } } -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s32)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31281,27 +32808,18 @@ pub fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { - let x: int64x1_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_s32(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - unsafe { - x = simd_add(vpaddl_s32(b), a); - }; - x +pub fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { + unsafe { simd_mul(a, vdupq_n_s16(b)) } } -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s32)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sadalp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31311,27 +32829,18 @@ pub fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { - let x: int64x2_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_s32(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - unsafe { - x = simd_add(vpaddlq_s32(b), a); - }; - x +pub fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { + unsafe { simd_mul(a, vdup_n_s32(b)) } } -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u8)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31341,27 +32850,18 @@ pub fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { - let x: uint16x4_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_u8(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - unsafe { - x = simd_add(vpaddl_u8(b), a); - }; - x +pub fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { + unsafe { simd_mul(a, vdupq_n_s32(b)) } } -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u8)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31371,27 +32871,18 @@ pub fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { - let x: uint16x8_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_u8(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - unsafe { - x = simd_add(vpaddlq_u8(b), a); - }; - x +pub fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { + unsafe { simd_mul(a, vdup_n_u16(b)) } } -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u16)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31401,27 +32892,18 @@ pub fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { - let x: uint32x2_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_u16(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - unsafe { - x = simd_add(vpaddl_u16(b), a); - }; - x +pub fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { + unsafe { simd_mul(a, vdupq_n_u16(b)) } } -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u16)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31431,27 +32913,18 @@ pub fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { - let x: uint32x4_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_u16(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - unsafe { - x = simd_add(vpaddlq_u16(b), a); - }; - x +pub fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { + unsafe { simd_mul(a, vdup_n_u32(b)) } } -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u32)"] +#[doc = "Vector multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31461,27 +32934,18 @@ pub fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { - let x: uint64x1_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadal_u32(a, b); - } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - unsafe { - x = simd_add(vpaddl_u32(b), a); - }; - x +pub fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { + unsafe { simd_mul(a, vdupq_n_u32(b)) } } -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u32)"] +#[doc = "Polynomial multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uadalp) + assert_instr(pmul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31491,57 +32955,55 @@ pub fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { - let x: uint64x2_t; - #[cfg(target_arch = "arm")] - { - x = priv_vpadalq_u32(a, b); +pub fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.pmul.v8i8" + )] + fn _vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; } - #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] - unsafe { - x = simd_add(vpaddlq_u32(b), a); - }; - x + unsafe { _vmul_p8(a, b) } } -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f16)"] +#[doc = "Polynomial multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_p8)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(faddp) + assert_instr(pmul) )] -#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vpadd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { +pub fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v4f16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddp.v4f16" + link_name = "llvm.aarch64.neon.pmul.v16i8" )] - fn _vpadd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + fn _vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; } - unsafe { _vpadd_f16(a, b) } + unsafe { _vmulq_p8(a, b) } } -#[doc = "Floating-point add pairwise"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(faddp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31551,26 +33013,18 @@ pub fn vpadd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.faddp.v2f32" - )] - fn _vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - unsafe { _vpadd_f32(a, b) } +pub fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { simd_mul(a, b) } } -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s8)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31580,26 +33034,18 @@ pub fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v8i8")] - fn _vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - unsafe { _vpadd_s8(a, b) } +pub fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { simd_mul(a, b) } } -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31609,26 +33055,18 @@ pub fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v4i16")] - fn _vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - unsafe { _vpadd_s16(a, b) } +pub fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_mul(a, b) } } -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31638,26 +33076,18 @@ pub fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.addp.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2i32")] - fn _vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - unsafe { _vpadd_s32(a, b) } +pub fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_mul(a, b) } } -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u8)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31667,18 +33097,18 @@ pub fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { transmute(vpadd_s8(transmute(a), transmute(b))) } +pub fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { simd_mul(a, b) } } -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31688,18 +33118,18 @@ pub fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { transmute(vpadd_s16(transmute(a), transmute(b))) } +pub fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { simd_mul(a, b) } } -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31709,18 +33139,18 @@ pub fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { transmute(vpadd_s32(transmute(a), transmute(b))) } +pub fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_mul(a, b) } } -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s8)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31730,26 +33160,18 @@ pub fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpaddl_s8(a: int8x8_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v4i16.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i16.v8i8")] - fn _vpaddl_s8(a: int8x8_t) -> int16x4_t; - } - unsafe { _vpaddl_s8(a) } +pub fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_mul(a, b) } } -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s8)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31759,26 +33181,18 @@ pub fn vpaddl_s8(a: int8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v8i16.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v8i16.v16i8")] - fn _vpaddlq_s8(a: int8x16_t) -> int16x8_t; - } - unsafe { _vpaddlq_s8(a) } +pub fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { simd_mul(a, b) } } -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31788,26 +33202,18 @@ pub fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpaddl_s16(a: int16x4_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v2i32.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i32.v4i16")] - fn _vpaddl_s16(a: int16x4_t) -> int32x2_t; - } - unsafe { _vpaddl_s16(a) } +pub fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { simd_mul(a, b) } } -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s16)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31817,26 +33223,18 @@ pub fn vpaddl_s16(a: int16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v4i32.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i32.v8i16")] - fn _vpaddlq_s16(a: int16x8_t) -> int32x4_t; - } - unsafe { _vpaddlq_s16(a) } +pub fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_mul(a, b) } } -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s32)"] +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) + assert_instr(mul) )] #[cfg_attr( not(target_arch = "arm"), @@ -31846,27 +33244,20 @@ pub fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpaddl_s32(a: int32x2_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v1i64.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v1i64.v2i32")] - fn _vpaddl_s32(a: int32x2_t) -> int64x1_t; - } - unsafe { _vpaddl_s32(a) } +pub fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { simd_mul(a, b) } } -#[doc = "Signed Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s32)"] +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(saddlp) + assert_instr(smull, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -31875,27 +33266,21 @@ pub fn vpaddl_s32(a: int32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.saddlp.v2i64.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i64.v4i32")] - fn _vpaddlq_s32(a: int32x4_t) -> int64x2_t; - } - unsafe { _vpaddlq_s32(a) } +pub fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmull_s16(a, vdup_lane_s16::(b)) } -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u8)"] +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) + assert_instr(smull, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -31904,27 +33289,21 @@ pub fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v4i16.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i16.v8i8")] - fn _vpaddl_u8(a: uint8x8_t) -> uint16x4_t; - } - unsafe { _vpaddl_u8(a) } +pub fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmull_s16(a, vdup_laneq_s16::(b)) } -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u8)"] +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) + assert_instr(smull, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -31933,27 +33312,21 @@ pub fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v8i16.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v8i16.v16i8")] - fn _vpaddlq_u8(a: uint8x16_t) -> uint16x8_t; - } - unsafe { _vpaddlq_u8(a) } +pub fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmull_s32(a, vdup_lane_s32::(b)) } -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u16)"] +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) + assert_instr(smull, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -31962,27 +33335,21 @@ pub fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v2i32.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i32.v4i16")] - fn _vpaddl_u16(a: uint16x4_t) -> uint32x2_t; - } - unsafe { _vpaddl_u16(a) } +pub fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmull_s32(a, vdup_laneq_s32::(b)) } -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u16)"] +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) + assert_instr(umull, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -31991,27 +33358,21 @@ pub fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v4i32.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i32.v8i16")] - fn _vpaddlq_u16(a: uint16x8_t) -> uint32x4_t; - } - unsafe { _vpaddlq_u16(a) } +pub fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + vmull_u16(a, vdup_lane_u16::(b)) } -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u32)"] +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) + assert_instr(umull, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -32020,27 +33381,21 @@ pub fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v1i64.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v1i64.v2i32")] - fn _vpaddl_u32(a: uint32x2_t) -> uint64x1_t; - } - unsafe { _vpaddl_u32(a) } +pub fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 3); + vmull_u16(a, vdup_laneq_u16::(b)) } -#[doc = "Unsigned Add and Accumulate Long Pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u32)"] +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uaddlp) + assert_instr(umull, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -32049,27 +33404,21 @@ pub fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uaddlp.v2i64.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i64.v4i32")] - fn _vpaddlq_u32(a: uint32x4_t) -> uint64x2_t; - } - unsafe { _vpaddlq_u32(a) } +pub fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + vmull_u32(a, vdup_lane_u32::(b)) } -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f32)"] +#[doc = "Vector long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fmaxp) + assert_instr(umull, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -32078,26 +33427,19 @@ pub fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fmaxp.v2f32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2f32")] - fn _vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - unsafe { _vpmax_f32(a, b) } +pub fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 2); + vmull_u32(a, vdup_laneq_u32::(b)) } -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s8)"] +#[doc = "Vector long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smaxp) + assert_instr(smull) )] #[cfg_attr( not(target_arch = "arm"), @@ -32107,26 +33449,18 @@ pub fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v8i8")] - fn _vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - unsafe { _vpmax_s8(a, b) } +pub fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { + vmull_s16(a, vdup_n_s16(b)) } -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s16)"] +#[doc = "Vector long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smaxp) + assert_instr(smull) )] #[cfg_attr( not(target_arch = "arm"), @@ -32136,26 +33470,18 @@ pub fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v4i16")] - fn _vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - unsafe { _vpmax_s16(a, b) } +pub fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { + vmull_s32(a, vdup_n_s32(b)) } -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s32)"] +#[doc = "Vector long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(smaxp) + assert_instr(umull) )] #[cfg_attr( not(target_arch = "arm"), @@ -32165,26 +33491,18 @@ pub fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.smaxp.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2i32")] - fn _vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - unsafe { _vpmax_s32(a, b) } +pub fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { + vmull_u16(a, vdup_n_u16(b)) } -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u8)"] +#[doc = "Vector long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umaxp) + assert_instr(umull) )] #[cfg_attr( not(target_arch = "arm"), @@ -32194,26 +33512,18 @@ pub fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v8i8")] - fn _vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - } - unsafe { _vpmax_u8(a, b) } +pub fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { + vmull_u32(a, vdup_n_u32(b)) } -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u16)"] +#[doc = "Polynomial multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.p8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umaxp) + assert_instr(pmull) )] #[cfg_attr( not(target_arch = "arm"), @@ -32223,26 +33533,26 @@ pub fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { +pub fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v4i16" + link_name = "llvm.aarch64.neon.pmull.v8i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v4i16")] - fn _vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullp.v8i16")] + fn _vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t; } - unsafe { _vpmax_u16(a, b) } + unsafe { _vmull_p8(a, b) } } -#[doc = "Folding maximum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u32)"] +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(umaxp) + assert_instr(smull) )] #[cfg_attr( not(target_arch = "arm"), @@ -32252,26 +33562,18 @@ pub fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.umaxp.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v2i32")] - fn _vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - } - unsafe { _vpmax_u32(a, b) } +pub fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + unsafe { simd_mul(simd_cast(a), simd_cast(b)) } } -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f32)"] +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fminp) + assert_instr(smull) )] #[cfg_attr( not(target_arch = "arm"), @@ -32281,26 +33583,18 @@ pub fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.fminp.v2f32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2f32")] - fn _vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - unsafe { _vpmin_f32(a, b) } +pub fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + unsafe { simd_mul(simd_cast(a), simd_cast(b)) } } -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s8)"] +#[doc = "Signed multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sminp) + assert_instr(smull) )] #[cfg_attr( not(target_arch = "arm"), @@ -32310,26 +33604,18 @@ pub fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v8i8")] - fn _vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - unsafe { _vpmin_s8(a, b) } +pub fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { + unsafe { simd_mul(simd_cast(a), simd_cast(b)) } } -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s16)"] +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sminp) + assert_instr(umull) )] #[cfg_attr( not(target_arch = "arm"), @@ -32339,26 +33625,18 @@ pub fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v4i16")] - fn _vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - unsafe { _vpmin_s16(a, b) } +pub fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { + unsafe { simd_mul(simd_cast(a), simd_cast(b)) } } -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s32)"] +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sminp) + assert_instr(umull) )] #[cfg_attr( not(target_arch = "arm"), @@ -32368,26 +33646,18 @@ pub fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sminp.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2i32")] - fn _vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - unsafe { _vpmin_s32(a, b) } +pub fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + unsafe { simd_mul(simd_cast(a), simd_cast(b)) } } -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u8)"] +#[doc = "Unsigned multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uminp) + assert_instr(umull) )] #[cfg_attr( not(target_arch = "arm"), @@ -32397,26 +33667,18 @@ pub fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v8i8")] - fn _vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - } - unsafe { _vpmin_u8(a, b) } +pub fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + unsafe { simd_mul(simd_cast(a), simd_cast(b)) } } -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u16)"] +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uminp) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -32426,26 +33688,19 @@ pub fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v4i16")] - fn _vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - } - unsafe { _vpmin_u16(a, b) } +pub fn vmvn_p8(a: poly8x8_t) -> poly8x8_t { + let b = poly8x8_t::splat(255); + unsafe { simd_xor(a, b) } } -#[doc = "Folding minimum of adjacent pairs"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u32)"] +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uminp) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -32455,26 +33710,19 @@ pub fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uminp.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v2i32")] - fn _vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - } - unsafe { _vpmin_u32(a, b) } +pub fn vmvn_s16(a: int16x4_t) -> int16x4_t { + let b = int16x4_t::splat(-1); + unsafe { simd_xor(a, b) } } -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s8)"] +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -32484,26 +33732,19 @@ pub fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqabs_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i8")] - fn _vqabs_s8(a: int8x8_t) -> int8x8_t; - } - unsafe { _vqabs_s8(a) } +pub fn vmvn_s32(a: int32x2_t) -> int32x2_t { + let b = int32x2_t::splat(-1); + unsafe { simd_xor(a, b) } } -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s8)"] +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -32513,26 +33754,19 @@ pub fn vqabs_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqabsq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v16i8")] - fn _vqabsq_s8(a: int8x16_t) -> int8x16_t; - } - unsafe { _vqabsq_s8(a) } +pub fn vmvn_s8(a: int8x8_t) -> int8x8_t { + let b = int8x8_t::splat(-1); + unsafe { simd_xor(a, b) } } -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s16)"] +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -32542,26 +33776,19 @@ pub fn vqabsq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqabs_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i16")] - fn _vqabs_s16(a: int16x4_t) -> int16x4_t; - } - unsafe { _vqabs_s16(a) } +pub fn vmvn_u16(a: uint16x4_t) -> uint16x4_t { + let b = uint16x4_t::splat(65_535); + unsafe { simd_xor(a, b) } } -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s16)"] +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -32571,26 +33798,19 @@ pub fn vqabs_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqabsq_s16(a: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i16")] - fn _vqabsq_s16(a: int16x8_t) -> int16x8_t; - } - unsafe { _vqabsq_s16(a) } +pub fn vmvn_u32(a: uint32x2_t) -> uint32x2_t { + let b = uint32x2_t::splat(4_294_967_295); + unsafe { simd_xor(a, b) } } -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s32)"] +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -32600,26 +33820,19 @@ pub fn vqabsq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqabs_s32(a: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v2i32")] - fn _vqabs_s32(a: int32x2_t) -> int32x2_t; - } - unsafe { _vqabs_s32(a) } +pub fn vmvn_u8(a: uint8x8_t) -> uint8x8_t { + let b = uint8x8_t::splat(255); + unsafe { simd_xor(a, b) } } -#[doc = "Signed saturating Absolute value"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s32)"] +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqabs) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -32629,26 +33842,19 @@ pub fn vqabs_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqabsq_s32(a: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqabs.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i32")] - fn _vqabsq_s32(a: int32x4_t) -> int32x4_t; - } - unsafe { _vqabsq_s32(a) } +pub fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t { + let b = poly8x16_t::splat(255); + unsafe { simd_xor(a, b) } } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s8)"] +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -32658,18 +33864,19 @@ pub fn vqabsq_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe { simd_saturating_add(a, b) } +pub fn vmvnq_s16(a: int16x8_t) -> int16x8_t { + let b = int16x8_t::splat(-1); + unsafe { simd_xor(a, b) } } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s8)"] +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -32679,18 +33886,19 @@ pub fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe { simd_saturating_add(a, b) } +pub fn vmvnq_s32(a: int32x4_t) -> int32x4_t { + let b = int32x4_t::splat(-1); + unsafe { simd_xor(a, b) } } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s16)"] +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -32700,18 +33908,19 @@ pub fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe { simd_saturating_add(a, b) } +pub fn vmvnq_s8(a: int8x16_t) -> int8x16_t { + let b = int8x16_t::splat(-1); + unsafe { simd_xor(a, b) } } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s16)"] +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -32721,18 +33930,19 @@ pub fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe { simd_saturating_add(a, b) } +pub fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t { + let b = uint16x8_t::splat(65_535); + unsafe { simd_xor(a, b) } } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s32)"] +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -32742,18 +33952,19 @@ pub fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe { simd_saturating_add(a, b) } +pub fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t { + let b = uint32x4_t::splat(4_294_967_295); + unsafe { simd_xor(a, b) } } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s32)"] +#[doc = "Vector bitwise not."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) + assert_instr(mvn) )] #[cfg_attr( not(target_arch = "arm"), @@ -32763,60 +33974,63 @@ pub fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe { simd_saturating_add(a, b) } +pub fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t { + let b = uint8x16_t::splat(255); + unsafe { simd_xor(a, b) } } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s64)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) + assert_instr(fneg) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe { simd_saturating_add(a, b) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vneg_f16(a: float16x4_t) -> float16x4_t { + unsafe { simd_neg(a) } } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s64)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqadd) + assert_instr(fneg) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe { simd_saturating_add(a, b) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vnegq_f16(a: float16x8_t) -> float16x8_t { + unsafe { simd_neg(a) } } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u8)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) + assert_instr(fneg) )] #[cfg_attr( not(target_arch = "arm"), @@ -32826,18 +34040,18 @@ pub fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_saturating_add(a, b) } +pub fn vneg_f32(a: float32x2_t) -> float32x2_t { + unsafe { simd_neg(a) } } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u8)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) + assert_instr(fneg) )] #[cfg_attr( not(target_arch = "arm"), @@ -32847,18 +34061,18 @@ pub fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe { simd_saturating_add(a, b) } +pub fn vnegq_f32(a: float32x4_t) -> float32x4_t { + unsafe { simd_neg(a) } } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u16)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) + assert_instr(neg) )] #[cfg_attr( not(target_arch = "arm"), @@ -32868,18 +34082,18 @@ pub fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_saturating_add(a, b) } +pub fn vneg_s8(a: int8x8_t) -> int8x8_t { + unsafe { simd_neg(a) } } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u16)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) + assert_instr(neg) )] #[cfg_attr( not(target_arch = "arm"), @@ -32889,18 +34103,18 @@ pub fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_saturating_add(a, b) } +pub fn vnegq_s8(a: int8x16_t) -> int8x16_t { + unsafe { simd_neg(a) } } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u32)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) + assert_instr(neg) )] #[cfg_attr( not(target_arch = "arm"), @@ -32910,18 +34124,18 @@ pub fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_saturating_add(a, b) } +pub fn vneg_s16(a: int16x4_t) -> int16x4_t { + unsafe { simd_neg(a) } } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u32)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) + assert_instr(neg) )] #[cfg_attr( not(target_arch = "arm"), @@ -32931,18 +34145,18 @@ pub fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_saturating_add(a, b) } +pub fn vnegq_s16(a: int16x8_t) -> int16x8_t { + unsafe { simd_neg(a) } } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u64)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) + assert_instr(neg) )] #[cfg_attr( not(target_arch = "arm"), @@ -32952,18 +34166,18 @@ pub fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - unsafe { simd_saturating_add(a, b) } +pub fn vneg_s32(a: int32x2_t) -> int32x2_t { + unsafe { simd_neg(a) } } -#[doc = "Saturating add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u64)"] +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqadd) + assert_instr(neg) )] #[cfg_attr( not(target_arch = "arm"), @@ -32973,24 +34187,19 @@ pub fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe { simd_saturating_add(a, b) } +pub fn vnegq_s32(a: int32x4_t) -> int32x4_t { + unsafe { simd_neg(a) } } -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s16)"] +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(sqdmlal, N = 2) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -32999,25 +34208,20 @@ pub fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - vqaddq_s32(a, vqdmull_lane_s16::(b, c)) +pub fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + let c = int16x4_t::splat(-1); + unsafe { simd_or(simd_xor(b, c), a) } } -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s32)"] +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(sqdmlal, N = 1) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -33026,19 +34230,19 @@ pub fn vqdmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - vqaddq_s64(a, vqdmull_lane_s32::(b, c)) +pub fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + let c = int32x2_t::splat(-1); + unsafe { simd_or(simd_xor(b, c), a) } } -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s16)"] +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -33048,18 +34252,19 @@ pub fn vqdmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - vqaddq_s32(a, vqdmull_n_s16(b, c)) +pub fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + let c = int64x1_t::splat(-1); + unsafe { simd_or(simd_xor(b, c), a) } } -#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s32)"] +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -33069,18 +34274,19 @@ pub fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - vqaddq_s64(a, vqdmull_n_s32(b, c)) +pub fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + let c = int8x8_t::splat(-1); + unsafe { simd_or(simd_xor(b, c), a) } } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s16)"] +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -33090,18 +34296,19 @@ pub fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - vqaddq_s32(a, vqdmull_s16(b, c)) +pub fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + let c = int16x8_t::splat(-1); + unsafe { simd_or(simd_xor(b, c), a) } } -#[doc = "Signed saturating doubling multiply-add long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s32)"] +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlal) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -33111,24 +34318,20 @@ pub fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - vqaddq_s64(a, vqdmull_s32(b, c)) +pub fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + let c = int32x4_t::splat(-1); + unsafe { simd_or(simd_xor(b, c), a) } } -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s16)"] +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(sqdmlsl, N = 2) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -33137,25 +34340,20 @@ pub fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - vqsubq_s32(a, vqdmull_lane_s16::(b, c)) +pub fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + let c = int64x2_t::splat(-1); + unsafe { simd_or(simd_xor(b, c), a) } } -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s32)"] +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(sqdmlsl, N = 1) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(orn) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -33164,19 +34362,19 @@ pub fn vqdmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - vqsubq_s64(a, vqdmull_lane_s32::(b, c)) +pub fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + let c = int8x16_t::splat(-1); + unsafe { simd_or(simd_xor(b, c), a) } } -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s16)"] +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -33186,18 +34384,19 @@ pub fn vqdmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { - vqsubq_s32(a, vqdmull_n_s16(b, c)) +pub fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + let c = int16x4_t::splat(-1); + unsafe { simd_or(simd_xor(b, transmute(c)), a) } } -#[doc = "Vector widening saturating doubling multiply subtract with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s32)"] +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -33207,18 +34406,19 @@ pub fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { - vqsubq_s64(a, vqdmull_n_s32(b, c)) +pub fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + let c = int32x2_t::splat(-1); + unsafe { simd_or(simd_xor(b, transmute(c)), a) } } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s16)"] +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -33228,18 +34428,19 @@ pub fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { - vqsubq_s32(a, vqdmull_s16(b, c)) +pub fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + let c = int64x1_t::splat(-1); + unsafe { simd_or(simd_xor(b, transmute(c)), a) } } -#[doc = "Signed saturating doubling multiply-subtract long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s32)"] +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmlsl) + assert_instr(orn) )] #[cfg_attr( not(target_arch = "arm"), @@ -33249,20 +34450,20 @@ pub fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { - vqsubq_s64(a, vqdmull_s32(b, c)) +pub fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + let c = int8x8_t::splat(-1); + unsafe { simd_or(simd_xor(b, transmute(c)), a) } } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s16)"] +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh, LANE = 0) + assert_instr(orn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -33271,21 +34472,20 @@ pub fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - vqdmulh_s16(a, vdup_n_s16(vgetq_lane_s16::(b))) +pub fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + let c = int16x8_t::splat(-1); + unsafe { simd_or(simd_xor(b, transmute(c)), a) } } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s16)"] +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh, LANE = 0) + assert_instr(orn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -33294,21 +34494,20 @@ pub fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - vqdmulhq_s16(a, vdupq_n_s16(vgetq_lane_s16::(b))) +pub fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + let c = int32x4_t::splat(-1); + unsafe { simd_or(simd_xor(b, transmute(c)), a) } } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s32)"] +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh, LANE = 0) + assert_instr(orn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -33317,21 +34516,20 @@ pub fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - vqdmulh_s32(a, vdup_n_s32(vgetq_lane_s32::(b))) +pub fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + let c = int64x2_t::splat(-1); + unsafe { simd_or(simd_xor(b, transmute(c)), a) } } -#[doc = "Vector saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s32)"] +#[doc = "Vector bitwise inclusive OR NOT"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh, LANE = 0) + assert_instr(orn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -33340,19 +34538,19 @@ pub fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - vqdmulhq_s32(a, vdupq_n_s32(vgetq_lane_s32::(b))) +pub fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + let c = int8x16_t::splat(-1); + unsafe { simd_or(simd_xor(b, transmute(c)), a) } } -#[doc = "Vector saturating doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s16)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -33362,19 +34560,18 @@ pub fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { - let b: int16x4_t = vdup_n_s16(b); - vqdmulh_s16(a, b) +pub fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { simd_or(a, b) } } -#[doc = "Vector saturating doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s16)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -33384,19 +34581,18 @@ pub fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { - let b: int16x8_t = vdupq_n_s16(b); - vqdmulhq_s16(a, b) +pub fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { simd_or(a, b) } } -#[doc = "Vector saturating doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s32)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -33406,19 +34602,18 @@ pub fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { - let b: int32x2_t = vdup_n_s32(b); - vqdmulh_s32(a, b) +pub fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { simd_or(a, b) } } -#[doc = "Vector saturating doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s32)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -33428,19 +34623,18 @@ pub fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { - let b: int32x4_t = vdupq_n_s32(b); - vqdmulhq_s32(a, b) +pub fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { simd_or(a, b) } } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s16)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -33450,26 +34644,18 @@ pub fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulh.v4i16" - )] - fn _vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - unsafe { _vqdmulh_s16(a, b) } +pub fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { simd_or(a, b) } } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s16)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -33479,26 +34665,18 @@ pub fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulh.v8i16" - )] - fn _vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - } - unsafe { _vqdmulhq_s16(a, b) } +pub fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { simd_or(a, b) } } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s32)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -33508,26 +34686,18 @@ pub fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulh.v2i32" - )] - fn _vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - unsafe { _vqdmulh_s32(a, b) } +pub fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { simd_or(a, b) } } -#[doc = "Signed saturating doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s32)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmulh) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -33537,28 +34707,19 @@ pub fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmulh.v4i32" - )] - fn _vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - unsafe { _vqdmulhq_s32(a, b) } +pub fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { simd_or(a, b) } } -#[doc = "Vector saturating doubling long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s16)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull, N = 2) + assert_instr(orr) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -33567,22 +34728,19 @@ pub fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 2); - let b = vdup_lane_s16::(b); - vqdmull_s16(a, b) +pub fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_or(a, b) } } -#[doc = "Vector saturating doubling long multiply by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s32)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull, N = 1) + assert_instr(orr) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -33591,20 +34749,18 @@ pub fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 1); - let b = vdup_lane_s32::(b); - vqdmull_s32(a, b) +pub fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { simd_or(a, b) } } -#[doc = "Vector saturating doubling long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s16)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -33614,18 +34770,18 @@ pub fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { - vqdmull_s16(a, vdup_n_s16(b)) +pub fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_or(a, b) } } -#[doc = "Vector saturating doubling long multiply with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s32)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -33635,18 +34791,18 @@ pub fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { - vqdmull_s32(a, vdup_n_s32(b)) +pub fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_or(a, b) } } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s16)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -33656,26 +34812,18 @@ pub fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmull.v4i32" - )] - fn _vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; - } - unsafe { _vqdmull_s16(a, b) } +pub fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_or(a, b) } } -#[doc = "Signed saturating doubling multiply long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s32)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqdmull) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -33685,26 +34833,18 @@ pub fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqdmull.v2i64" - )] - fn _vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; - } - unsafe { _vqdmull_s32(a, b) } +pub fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_or(a, b) } } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s16)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtn) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -33714,26 +34854,18 @@ pub fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqmovn_s16(a: int16x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtn.v8i8" - )] - fn _vqmovn_s16(a: int16x8_t) -> int8x8_t; - } - unsafe { _vqmovn_s16(a) } +pub fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { simd_or(a, b) } } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s32)"] +#[doc = "Vector bitwise or (immediate, inclusive)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtn) + assert_instr(orr) )] #[cfg_attr( not(target_arch = "arm"), @@ -33743,26 +34875,18 @@ pub fn vqmovn_s16(a: int16x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqmovn_s32(a: int32x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtn.v4i16" - )] - fn _vqmovn_s32(a: int32x4_t) -> int16x4_t; - } - unsafe { _vqmovn_s32(a) } +pub fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { simd_or(a, b) } } -#[doc = "Signed saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s64)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtn) + assert_instr(sadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -33772,26 +34896,27 @@ pub fn vqmovn_s32(a: int32x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqmovn_s64(a: int64x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtn.v2i32" - )] - fn _vqmovn_s64(a: int64x2_t) -> int32x2_t; +pub fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { + let x: int16x4_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_s8(a, b); } - unsafe { _vqmovn_s64(a) } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + unsafe { + x = simd_add(vpaddl_s8(b), a); + }; + x } -#[doc = "Unsigned saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u16)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqxtn) + assert_instr(sadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -33801,26 +34926,27 @@ pub fn vqmovn_s64(a: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqxtn.v8i8" - )] - fn _vqmovn_u16(a: uint16x8_t) -> uint8x8_t; +pub fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { + let x: int16x8_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_s8(a, b); } - unsafe { _vqmovn_u16(a) } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + unsafe { + x = simd_add(vpaddlq_s8(b), a); + }; + x } -#[doc = "Unsigned saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u32)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqxtn) + assert_instr(sadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -33830,26 +34956,27 @@ pub fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqxtn.v4i16" - )] - fn _vqmovn_u32(a: uint32x4_t) -> uint16x4_t; +pub fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { + let x: int32x2_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_s16(a, b); } - unsafe { _vqmovn_u32(a) } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + unsafe { + x = simd_add(vpaddl_s16(b), a); + }; + x } -#[doc = "Unsigned saturating extract narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u64)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqxtn) + assert_instr(sadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -33859,26 +34986,27 @@ pub fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqxtn.v2i32" - )] - fn _vqmovn_u64(a: uint64x2_t) -> uint32x2_t; +pub fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { + let x: int32x4_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_s16(a, b); } - unsafe { _vqmovn_u64(a) } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + unsafe { + x = simd_add(vpaddlq_s16(b), a); + }; + x } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s16)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtun) + assert_instr(sadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -33888,26 +35016,27 @@ pub fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtun.v8i8" - )] - fn _vqmovun_s16(a: int16x8_t) -> uint8x8_t; +pub fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { + let x: int64x1_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_s32(a, b); } - unsafe { _vqmovun_s16(a) } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + unsafe { + x = simd_add(vpaddl_s32(b), a); + }; + x } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s32)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtun) + assert_instr(sadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -33917,26 +35046,27 @@ pub fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtun.v4i16" - )] - fn _vqmovun_s32(a: int32x4_t) -> uint16x4_t; +pub fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { + let x: int64x2_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_s32(a, b); } - unsafe { _vqmovun_s32(a) } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + unsafe { + x = simd_add(vpaddlq_s32(b), a); + }; + x } -#[doc = "Signed saturating extract unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s64)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqxtun) + assert_instr(uadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -33946,26 +35076,27 @@ pub fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqxtun.v2i32" - )] - fn _vqmovun_s64(a: int64x2_t) -> uint32x2_t; +pub fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { + let x: uint16x4_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_u8(a, b); } - unsafe { _vqmovun_s64(a) } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + unsafe { + x = simd_add(vpaddl_u8(b), a); + }; + x } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s8)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) + assert_instr(uadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -33975,26 +35106,27 @@ pub fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqneg_s8(a: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i8")] - fn _vqneg_s8(a: int8x8_t) -> int8x8_t; +pub fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { + let x: uint16x8_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_u8(a, b); } - unsafe { _vqneg_s8(a) } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + unsafe { + x = simd_add(vpaddlq_u8(b), a); + }; + x } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s8)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) + assert_instr(uadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34004,26 +35136,27 @@ pub fn vqneg_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqnegq_s8(a: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v16i8")] - fn _vqnegq_s8(a: int8x16_t) -> int8x16_t; +pub fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { + let x: uint32x2_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_u16(a, b); } - unsafe { _vqnegq_s8(a) } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + unsafe { + x = simd_add(vpaddl_u16(b), a); + }; + x } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s16)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) + assert_instr(uadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34033,26 +35166,27 @@ pub fn vqnegq_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqneg_s16(a: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i16")] - fn _vqneg_s16(a: int16x4_t) -> int16x4_t; +pub fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { + let x: uint32x4_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_u16(a, b); } - unsafe { _vqneg_s16(a) } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + unsafe { + x = simd_add(vpaddlq_u16(b), a); + }; + x } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s16)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) + assert_instr(uadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34062,26 +35196,27 @@ pub fn vqneg_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqnegq_s16(a: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i16")] - fn _vqnegq_s16(a: int16x8_t) -> int16x8_t; +pub fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { + let x: uint64x1_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadal_u32(a, b); } - unsafe { _vqnegq_s16(a) } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + unsafe { + x = simd_add(vpaddl_u32(b), a); + }; + x } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s32)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) + assert_instr(uadalp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34091,81 +35226,96 @@ pub fn vqnegq_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqneg_s32(a: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v2i32")] - fn _vqneg_s32(a: int32x2_t) -> int32x2_t; +pub fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { + let x: uint64x2_t; + #[cfg(target_arch = "arm")] + { + x = priv_vpadalq_u32(a, b); } - unsafe { _vqneg_s32(a) } + #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] + unsafe { + x = simd_add(vpaddlq_u32(b), a); + }; + x } -#[doc = "Signed saturating negate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s32)"] +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqneg) + assert_instr(faddp) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqnegq_s32(a: int32x4_t) -> int32x4_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vpadd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v4f16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqneg.v4i32" + link_name = "llvm.aarch64.neon.faddp.v4f16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i32")] - fn _vqnegq_s32(a: int32x4_t) -> int32x4_t; + fn _vpadd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; } - unsafe { _vqnegq_s32(a) } + unsafe { _vpadd_f16(a, b) } } -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s16)"] +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) + assert_instr(faddp) )] -#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - let b = vdup_lane_s16::(b); - vqrdmulh_s16(a, b) +#[cfg(not(target_arch = "arm64ec"))] +pub fn vpadd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v4f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.faddp.v4f16" + )] + fn _vpadd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + } + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float16x4_t = _vpadd_f16(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s32)"] +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) + assert_instr(faddp) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -34174,22 +35324,28 @@ pub fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let b = vdup_lane_s32::(b); - vqrdmulh_s32(a, b) +pub fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.faddp.v2f32" + )] + fn _vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { _vpadd_f32(a, b) } } -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s16)"] +#[doc = "Floating-point add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) + assert_instr(faddp) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -34198,22 +35354,33 @@ pub fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 3); - let b = vdup_laneq_s16::(b); - vqrdmulh_s16(a, b) +pub fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.faddp.v2f32" + )] + fn _vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float32x2_t = _vpadd_f32(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s32)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) + assert_instr(addp) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -34222,22 +35389,28 @@ pub fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let b = vdup_laneq_s32::(b); - vqrdmulh_s32(a, b) +pub fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v8i8")] + fn _vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + unsafe { _vpadd_s8(a, b) } } -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s16)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) + assert_instr(addp) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -34246,22 +35419,33 @@ pub fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 2); - let b = vdupq_lane_s16::(b); - vqrdmulhq_s16(a, b) +pub fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v8i8")] + fn _vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = _vpadd_s8(a, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s32)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) + assert_instr(addp) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -34270,22 +35454,28 @@ pub fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let b = vdupq_lane_s32::(b); - vqrdmulhq_s32(a, b) +pub fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v4i16")] + fn _vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + unsafe { _vpadd_s16(a, b) } } -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s16)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) + assert_instr(addp) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -34294,22 +35484,33 @@ pub fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - let b = vdupq_laneq_s16::(b); - vqrdmulhq_s16(a, b) +pub fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v4i16")] + fn _vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int16x4_t = _vpadd_s16(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Vector rounding saturating doubling multiply high by scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s32)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh, LANE = 1) + assert_instr(addp) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -34318,20 +35519,27 @@ pub fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let b = vdupq_laneq_s32::(b); - vqrdmulhq_s32(a, b) +pub fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2i32")] + fn _vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + unsafe { _vpadd_s32(a, b) } } -#[doc = "Vector saturating rounding doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s16)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) + assert_instr(addp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34341,18 +35549,31 @@ pub fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { - vqrdmulh_s16(a, vdup_n_s16(b)) +pub fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.addp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2i32")] + fn _vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int32x2_t = _vpadd_s32(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Vector saturating rounding doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s16)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) + assert_instr(addp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34362,18 +35583,18 @@ pub fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { - vqrdmulhq_s16(a, vdupq_n_s16(b)) +pub fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { transmute(vpadd_s8(transmute(a), transmute(b))) } } -#[doc = "Vector saturating rounding doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s32)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) + assert_instr(addp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34383,18 +35604,18 @@ pub fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { - vqrdmulh_s32(a, vdup_n_s32(b)) +pub fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { transmute(vpadd_s16(transmute(a), transmute(b))) } } -#[doc = "Vector saturating rounding doubling multiply high with scalar"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s32)"] +#[doc = "Add pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) + assert_instr(addp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34404,18 +35625,18 @@ pub fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { - vqrdmulhq_s32(a, vdupq_n_s32(b)) +pub fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { transmute(vpadd_s32(transmute(a), transmute(b))) } } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s16)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) + assert_instr(saddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34425,26 +35646,26 @@ pub fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { +pub fn vpaddl_s8(a: int8x8_t) -> int16x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmulh.v4i16" + link_name = "llvm.aarch64.neon.saddlp.v4i16.v8i8" )] - fn _vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i16.v8i8")] + fn _vpaddl_s8(a: int8x8_t) -> int16x4_t; } - unsafe { _vqrdmulh_s16(a, b) } + unsafe { _vpaddl_s8(a) } } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s16)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) + assert_instr(saddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34454,26 +35675,26 @@ pub fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { +pub fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmulh.v8i16" + link_name = "llvm.aarch64.neon.saddlp.v8i16.v16i8" )] - fn _vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v8i16.v16i8")] + fn _vpaddlq_s8(a: int8x16_t) -> int16x8_t; } - unsafe { _vqrdmulhq_s16(a, b) } + unsafe { _vpaddlq_s8(a) } } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s32)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) + assert_instr(saddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34483,26 +35704,26 @@ pub fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { +pub fn vpaddl_s16(a: int16x4_t) -> int32x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmulh.v2i32" + link_name = "llvm.aarch64.neon.saddlp.v2i32.v4i16" )] - fn _vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i32.v4i16")] + fn _vpaddl_s16(a: int16x4_t) -> int32x2_t; } - unsafe { _vqrdmulh_s32(a, b) } + unsafe { _vpaddl_s16(a) } } -#[doc = "Signed saturating rounding doubling multiply returning high half"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s32)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrdmulh) + assert_instr(saddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34512,26 +35733,26 @@ pub fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { +pub fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrdmulh.v4i32" + link_name = "llvm.aarch64.neon.saddlp.v4i32.v8i16" )] - fn _vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v4i32.v8i16")] + fn _vpaddlq_s16(a: int16x8_t) -> int32x4_t; } - unsafe { _vqrdmulhq_s32(a, b) } + unsafe { _vpaddlq_s16(a) } } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s8)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) + assert_instr(saddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34541,26 +35762,26 @@ pub fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { +pub fn vpaddl_s32(a: int32x2_t) -> int64x1_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v8i8" + link_name = "llvm.aarch64.neon.saddlp.v1i64.v2i32" )] - fn _vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v1i64.v2i32")] + fn _vpaddl_s32(a: int32x2_t) -> int64x1_t; } - unsafe { _vqrshl_s8(a, b) } + unsafe { _vpaddl_s32(a) } } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s8)"] +#[doc = "Signed Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) + assert_instr(saddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34570,26 +35791,26 @@ pub fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +pub fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v16i8" + link_name = "llvm.aarch64.neon.saddlp.v2i64.v4i32" )] - fn _vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddls.v2i64.v4i32")] + fn _vpaddlq_s32(a: int32x4_t) -> int64x2_t; } - unsafe { _vqrshlq_s8(a, b) } + unsafe { _vpaddlq_s32(a) } } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s16)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) + assert_instr(uaddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34599,26 +35820,26 @@ pub fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { +pub fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v4i16" + link_name = "llvm.aarch64.neon.uaddlp.v4i16.v8i8" )] - fn _vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i16.v8i8")] + fn _vpaddl_u8(a: uint8x8_t) -> uint16x4_t; } - unsafe { _vqrshl_s16(a, b) } + unsafe { _vpaddl_u8(a) } } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s16)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) + assert_instr(uaddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34628,26 +35849,26 @@ pub fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { +pub fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v8i16" + link_name = "llvm.aarch64.neon.uaddlp.v8i16.v16i8" )] - fn _vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v8i16.v16i8")] + fn _vpaddlq_u8(a: uint8x16_t) -> uint16x8_t; } - unsafe { _vqrshlq_s16(a, b) } + unsafe { _vpaddlq_u8(a) } } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s32)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) + assert_instr(uaddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34657,26 +35878,26 @@ pub fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { +pub fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v2i32" + link_name = "llvm.aarch64.neon.uaddlp.v2i32.v4i16" )] - fn _vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i32.v4i16")] + fn _vpaddl_u16(a: uint16x4_t) -> uint32x2_t; } - unsafe { _vqrshl_s32(a, b) } + unsafe { _vpaddl_u16(a) } } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s32)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) + assert_instr(uaddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34686,26 +35907,26 @@ pub fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { +pub fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v4i32" + link_name = "llvm.aarch64.neon.uaddlp.v4i32.v8i16" )] - fn _vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v4i32.v8i16")] + fn _vpaddlq_u16(a: uint16x8_t) -> uint32x4_t; } - unsafe { _vqrshlq_s32(a, b) } + unsafe { _vpaddlq_u16(a) } } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s64)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) + assert_instr(uaddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34715,26 +35936,26 @@ pub fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { +pub fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v1i64" + link_name = "llvm.aarch64.neon.uaddlp.v1i64.v2i32" )] - fn _vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v1i64.v2i32")] + fn _vpaddl_u32(a: uint32x2_t) -> uint64x1_t; } - unsafe { _vqrshl_s64(a, b) } + unsafe { _vpaddl_u32(a) } } -#[doc = "Signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s64)"] +#[doc = "Unsigned Add and Accumulate Long Pairwise."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqrshl) + assert_instr(uaddlp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34744,26 +35965,27 @@ pub fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { +pub fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshl.v2i64" + link_name = "llvm.aarch64.neon.uaddlp.v2i64.v4i32" )] - fn _vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpaddlu.v2i64.v4i32")] + fn _vpaddlq_u32(a: uint32x4_t) -> uint64x2_t; } - unsafe { _vqrshlq_s64(a, b) } + unsafe { _vpaddlq_u32(a) } } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u8)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) + assert_instr(fmaxp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34773,26 +35995,27 @@ pub fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { +pub fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v8i8" + link_name = "llvm.aarch64.neon.fmaxp.v2f32" )] - fn _vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2f32")] + fn _vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; } - unsafe { _vqrshl_u8(a, b) } + unsafe { _vpmax_f32(a, b) } } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u8)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) + assert_instr(fmaxp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34802,26 +36025,32 @@ pub fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { +pub fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v16i8" + link_name = "llvm.aarch64.neon.fmaxp.v2f32" )] - fn _vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2f32")] + fn _vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float32x2_t = _vpmax_f32(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) } - unsafe { _vqrshlq_u8(a, b) } } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u16)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) + assert_instr(smaxp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34831,26 +36060,27 @@ pub fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { +pub fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v4i16" + link_name = "llvm.aarch64.neon.smaxp.v8i8" )] - fn _vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v8i8")] + fn _vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } - unsafe { _vqrshl_u16(a, b) } + unsafe { _vpmax_s8(a, b) } } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u16)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) + assert_instr(smaxp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34860,26 +36090,32 @@ pub fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { +pub fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v8i16" + link_name = "llvm.aarch64.neon.smaxp.v8i8" )] - fn _vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v8i8")] + fn _vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = _vpmax_s8(a, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - unsafe { _vqrshlq_u16(a, b) } } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u32)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) + assert_instr(smaxp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34889,26 +36125,27 @@ pub fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { +pub fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v2i32" + link_name = "llvm.aarch64.neon.smaxp.v4i16" )] - fn _vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v4i16")] + fn _vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } - unsafe { _vqrshl_u32(a, b) } + unsafe { _vpmax_s16(a, b) } } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u32)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) + assert_instr(smaxp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34918,26 +36155,32 @@ pub fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { +pub fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v4i32" + link_name = "llvm.aarch64.neon.smaxp.v4i16" )] - fn _vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v4i16")] + fn _vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int16x4_t = _vpmax_s16(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vqrshlq_u32(a, b) } } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u64)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) + assert_instr(smaxp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34947,26 +36190,27 @@ pub fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { +pub fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v1i64" + link_name = "llvm.aarch64.neon.smaxp.v2i32" )] - fn _vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2i32")] + fn _vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } - unsafe { _vqrshl_u64(a, b) } + unsafe { _vpmax_s32(a, b) } } -#[doc = "Unsigned signed saturating rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u64)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqrshl) + assert_instr(smaxp) )] #[cfg_attr( not(target_arch = "arm"), @@ -34976,343 +36220,193 @@ pub fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { +pub fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshl.v2i64" + link_name = "llvm.aarch64.neon.smaxp.v2i32" )] - fn _vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxs.v2i32")] + fn _vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } - unsafe { _vqrshlq_u64(a, b) } -} -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")] - fn _vqrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int32x2_t = _vpmax_s32(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) } - unsafe { _vqrshrn_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u8)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")] - fn _vqrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v8i8")] + fn _vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; } - unsafe { _vqrshrn_n_s32(a, const { int32x4_t([-N; 4]) }) } + unsafe { _vpmax_u8(a, b) } } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u8)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")] - fn _vqrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v8i8")] + fn _vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; + } + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = _vpmax_u8(a, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - unsafe { _vqrshrn_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrn.v8i8" + link_name = "llvm.aarch64.neon.umaxp.v4i16" )] - fn _vqrshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v4i16")] + fn _vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; } - unsafe { _vqrshrn_n_s16(a, N) } + unsafe { _vpmax_u16(a, b) } } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrn.v4i16" + link_name = "llvm.aarch64.neon.umaxp.v4i16" )] - fn _vqrshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v4i16")] + fn _vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; + } + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint16x4_t = _vpmax_u16(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vqrshrn_n_s32(a, N) } } -#[doc = "Signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(umaxp) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrn.v2i32" + link_name = "llvm.aarch64.neon.umaxp.v2i32" )] - fn _vqrshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v2i32")] + fn _vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; } - unsafe { _vqrshrn_n_s64(a, N) } + unsafe { _vpmax_u32(a, b) } } -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")] - fn _vqrshrn_n_u16(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t; - } - unsafe { _vqrshrn_n_u16(a, const { uint16x8_t([-N as u16; 8]) }) } -} -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")] - fn _vqrshrn_n_u32(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t; - } - unsafe { _vqrshrn_n_u32(a, const { uint32x4_t([-N as u32; 4]) }) } -} -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")] - fn _vqrshrn_n_u64(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t; - } - unsafe { _vqrshrn_n_u64(a, const { uint64x2_t([-N as u64; 2]) }) } -} -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshrn.v8i8" - )] - fn _vqrshrn_n_u16(a: uint16x8_t, n: i32) -> uint8x8_t; - } - unsafe { _vqrshrn_n_u16(a, N) } -} -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshrn.v4i16" - )] - fn _vqrshrn_n_u32(a: uint32x4_t, n: i32) -> uint16x4_t; - } - unsafe { _vqrshrn_n_u32(a, N) } -} -#[doc = "Unsigned signed saturating rounded shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqrshrn.v2i32" - )] - fn _vqrshrn_n_u64(a: uint64x2_t, n: i32) -> uint32x2_t; - } - unsafe { _vqrshrn_n_u64(a, N) } -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")] - fn _vqrshrun_n_s16(a: int16x8_t, n: int16x8_t) -> uint8x8_t; - } - unsafe { _vqrshrun_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")] - fn _vqrshrun_n_s32(a: int32x4_t, n: int32x4_t) -> uint16x4_t; - } - unsafe { _vqrshrun_n_s32(a, const { int32x4_t([-N; 4]) }) } -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")] - fn _vqrshrun_n_s64(a: int64x2_t, n: int64x2_t) -> uint32x2_t; - } - unsafe { _vqrshrun_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrun.v8i8" - )] - fn _vqrshrun_n_s16(a: int16x8_t, n: i32) -> uint8x8_t; - } - unsafe { _vqrshrun_n_s16(a, N) } -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrun.v4i16" - )] - fn _vqrshrun_n_s32(a: int32x4_t, n: i32) -> uint16x4_t; - } - unsafe { _vqrshrun_n_s32(a, N) } -} -#[doc = "Signed saturating rounded shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqrshrun.v2i32" - )] - fn _vqrshrun_n_s64(a: int64x2_t, n: i32) -> uint32x2_t; - } - unsafe { _vqrshrun_n_s64(a, N) } -} -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s8)"] +#[doc = "Folding maximum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) + assert_instr(umaxp) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -35321,21 +36415,33 @@ pub fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - vqshl_s8(a, vdup_n_s8(N as _)) +pub fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.umaxp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmaxu.v2i32")] + fn _vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; + } + unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint32x2_t = _vpmax_u32(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s8)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) + assert_instr(fminp) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -35344,21 +36450,28 @@ pub fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); - vqshlq_s8(a, vdupq_n_s8(N as _)) +pub fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminp.v2f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2f32")] + fn _vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { _vpmin_f32(a, b) } } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s16)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) + assert_instr(fminp) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -35367,21 +36480,33 @@ pub fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 4); - vqshl_s16(a, vdup_n_s16(N as _)) +pub fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.fminp.v2f32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2f32")] + fn _vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float32x2_t = _vpmin_f32(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s16)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) + assert_instr(sminp) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -35390,21 +36515,28 @@ pub fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 4); - vqshlq_s16(a, vdupq_n_s16(N as _)) +pub fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v8i8")] + fn _vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + unsafe { _vpmin_s8(a, b) } } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s32)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) + assert_instr(sminp) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -35413,21 +36545,33 @@ pub fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(N, 5); - vqshl_s32(a, vdup_n_s32(N as _)) +pub fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v8i8")] + fn _vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = _vpmin_s8(a, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s32)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) + assert_instr(sminp) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -35436,21 +36580,28 @@ pub fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 5); - vqshlq_s32(a, vdupq_n_s32(N as _)) +pub fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v4i16")] + fn _vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + unsafe { _vpmin_s16(a, b) } } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s64)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) + assert_instr(sminp) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -35459,21 +36610,33 @@ pub fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshl_n_s64(a: int64x1_t) -> int64x1_t { - static_assert_uimm_bits!(N, 6); - vqshl_s64(a, vdup_n_s64(N as _)) +pub fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v4i16")] + fn _vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int16x4_t = _vpmin_s16(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s64)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl, N = 2) + assert_instr(sminp) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -35482,21 +36645,28 @@ pub fn vqshl_n_s64(a: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 6); - vqshlq_s64(a, vdupq_n_s64(N as _)) +pub fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2i32")] + fn _vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + unsafe { _vpmin_s32(a, b) } } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u8)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) + assert_instr(sminp) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -35505,21 +36675,33 @@ pub fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - vqshl_u8(a, vdup_n_s8(N as _)) +pub fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sminp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpmins.v2i32")] + fn _vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int32x2_t = _vpmin_s32(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u8)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) + assert_instr(uminp) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -35528,21 +36710,28 @@ pub fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - vqshlq_u8(a, vdupq_n_s8(N as _)) +pub fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v8i8")] + fn _vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; + } + unsafe { _vpmin_u8(a, b) } } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u16)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) + assert_instr(uminp) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -35551,21 +36740,33 @@ pub fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - vqshl_u16(a, vdup_n_s16(N as _)) +pub fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v8i8")] + fn _vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; + } + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = _vpmin_u8(a, b); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u16)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) + assert_instr(uminp) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -35574,21 +36775,28 @@ pub fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - vqshlq_u16(a, vdupq_n_s16(N as _)) +pub fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v4i16")] + fn _vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; + } + unsafe { _vpmin_u16(a, b) } } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u32)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) + assert_instr(uminp) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -35597,21 +36805,33 @@ pub fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 5); - vqshl_u32(a, vdup_n_s32(N as _)) +pub fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v4i16")] + fn _vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; + } + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint16x4_t = _vpmin_u16(a, b); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u32)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) + assert_instr(uminp) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -35620,21 +36840,28 @@ pub fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 5); - vqshlq_u32(a, vdupq_n_s32(N as _)) +pub fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v2i32")] + fn _vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; + } + unsafe { _vpmin_u32(a, b) } } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u64)"] +#[doc = "Folding minimum of adjacent pairs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) + assert_instr(uminp) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -35643,21 +36870,32 @@ pub fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t { - static_assert_uimm_bits!(N, 6); - vqshl_u64(a, vdup_n_s64(N as _)) +pub fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uminp.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpminu.v2i32")] + fn _vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; + } + unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint32x2_t = _vpmin_u32(a, b); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u64)"] +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl, N = 2) + assert_instr(sqabs) )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -35666,19 +36904,26 @@ pub fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 6); - vqshlq_u64(a, vdupq_n_s64(N as _)) +pub fn vqabs_s8(a: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqabs.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i8")] + fn _vqabs_s8(a: int8x8_t) -> int8x8_t; + } + unsafe { _vqabs_s8(a) } } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s8)"] +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) + assert_instr(sqabs) )] #[cfg_attr( not(target_arch = "arm"), @@ -35688,26 +36933,26 @@ pub fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { +pub fn vqabsq_s8(a: int8x16_t) -> int8x16_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v8i8" + link_name = "llvm.aarch64.neon.sqabs.v16i8" )] - fn _vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v16i8")] + fn _vqabsq_s8(a: int8x16_t) -> int8x16_t; } - unsafe { _vqshl_s8(a, b) } + unsafe { _vqabsq_s8(a) } } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s8)"] +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) + assert_instr(sqabs) )] #[cfg_attr( not(target_arch = "arm"), @@ -35717,26 +36962,26 @@ pub fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +pub fn vqabs_s16(a: int16x4_t) -> int16x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v16i8" + link_name = "llvm.aarch64.neon.sqabs.v4i16" )] - fn _vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i16")] + fn _vqabs_s16(a: int16x4_t) -> int16x4_t; } - unsafe { _vqshlq_s8(a, b) } + unsafe { _vqabs_s16(a) } } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s16)"] +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) + assert_instr(sqabs) )] #[cfg_attr( not(target_arch = "arm"), @@ -35746,26 +36991,26 @@ pub fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { +pub fn vqabsq_s16(a: int16x8_t) -> int16x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v4i16" + link_name = "llvm.aarch64.neon.sqabs.v8i16" )] - fn _vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i16")] + fn _vqabsq_s16(a: int16x8_t) -> int16x8_t; } - unsafe { _vqshl_s16(a, b) } + unsafe { _vqabsq_s16(a) } } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s16)"] +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) + assert_instr(sqabs) )] #[cfg_attr( not(target_arch = "arm"), @@ -35775,26 +37020,26 @@ pub fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { +pub fn vqabs_s32(a: int32x2_t) -> int32x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v8i16" + link_name = "llvm.aarch64.neon.sqabs.v2i32" )] - fn _vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v2i32")] + fn _vqabs_s32(a: int32x2_t) -> int32x2_t; } - unsafe { _vqshlq_s16(a, b) } + unsafe { _vqabs_s32(a) } } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s32)"] +#[doc = "Signed saturating Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) + assert_instr(sqabs) )] #[cfg_attr( not(target_arch = "arm"), @@ -35804,26 +37049,26 @@ pub fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { +pub fn vqabsq_s32(a: int32x4_t) -> int32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v2i32" + link_name = "llvm.aarch64.neon.sqabs.v4i32" )] - fn _vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i32")] + fn _vqabsq_s32(a: int32x4_t) -> int32x4_t; } - unsafe { _vqshl_s32(a, b) } + unsafe { _vqabsq_s32(a) } } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s32)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) + assert_instr(sqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -35833,26 +37078,18 @@ pub fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v4i32" - )] - fn _vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - } - unsafe { _vqshlq_s32(a, b) } +pub fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { simd_saturating_add(a, b) } } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s64)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) + assert_instr(sqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -35862,26 +37099,18 @@ pub fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v1i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v1i64" - )] - fn _vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - } - unsafe { _vqshl_s64(a, b) } +pub fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { simd_saturating_add(a, b) } } -#[doc = "Signed saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s64)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqshl) + assert_instr(sqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -35891,26 +37120,18 @@ pub fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshl.v2i64" - )] - fn _vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - } - unsafe { _vqshlq_s64(a, b) } +pub fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { simd_saturating_add(a, b) } } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u8)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) + assert_instr(sqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -35920,26 +37141,18 @@ pub fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v8i8" - )] - fn _vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; - } - unsafe { _vqshl_u8(a, b) } +pub fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { simd_saturating_add(a, b) } } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u8)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) + assert_instr(sqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -35949,26 +37162,18 @@ pub fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v16i8" - )] - fn _vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; - } - unsafe { _vqshlq_u8(a, b) } +pub fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { simd_saturating_add(a, b) } } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u16)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) + assert_instr(sqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -35978,26 +37183,18 @@ pub fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v4i16" - )] - fn _vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; - } - unsafe { _vqshl_u16(a, b) } +pub fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { simd_saturating_add(a, b) } } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u16)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) + assert_instr(sqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -36007,26 +37204,18 @@ pub fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v8i16" - )] - fn _vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; - } - unsafe { _vqshlq_u16(a, b) } +pub fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { simd_saturating_add(a, b) } } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u32)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) + assert_instr(sqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -36036,26 +37225,18 @@ pub fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v2i32" - )] - fn _vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; - } - unsafe { _vqshl_u32(a, b) } +pub fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { simd_saturating_add(a, b) } } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u32)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) + assert_instr(uqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -36065,26 +37246,18 @@ pub fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v4i32" - )] - fn _vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; - } - unsafe { _vqshlq_u32(a, b) } +pub fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_saturating_add(a, b) } } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u64)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) + assert_instr(uqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -36094,26 +37267,18 @@ pub fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v1i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v1i64" - )] - fn _vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; - } - unsafe { _vqshl_u64(a, b) } +pub fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { simd_saturating_add(a, b) } } -#[doc = "Unsigned saturating shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u64)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqshl) + assert_instr(uqadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -36123,622 +37288,401 @@ pub fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshl.v2i64" - )] - fn _vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; - } - unsafe { _vqshlq_u64(a, b) } +pub fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_saturating_add(a, b) } } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u16)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")] - fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> uint8x8_t; - } - unsafe { _vqshlu_n_s8(a, const { int8x8_t([N as i8; 8]) }) } +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_saturating_add(a, b) } } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u32)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")] - fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> uint8x16_t; - } - unsafe { _vqshluq_n_s8(a, const { int8x16_t([N as i8; 16]) }) } +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_saturating_add(a, b) } } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u32)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")] - fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> uint16x4_t; - } - unsafe { _vqshlu_n_s16(a, const { int16x4_t([N as i16; 4]) }) } +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_saturating_add(a, b) } } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u64)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")] - fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> uint16x8_t; - } - unsafe { _vqshluq_n_s16(a, const { int16x8_t([N as i16; 8]) }) } -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 5); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")] - fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> uint32x2_t; - } - unsafe { _vqshlu_n_s32(a, const { int32x2_t([N; 2]) }) } -} -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 5); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")] - fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> uint32x4_t; - } - unsafe { _vqshluq_n_s32(a, const { int32x4_t([N; 4]) }) } +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { simd_saturating_add(a, b) } } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u64)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { - static_assert_uimm_bits!(N, 6); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v1i64")] - fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> uint64x1_t; - } - unsafe { _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }) } +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqadd) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { simd_saturating_add(a, b) } } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s16)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 6); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")] - fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> uint64x2_t; - } - unsafe { _vqshluq_n_s64(a, const { int64x2_t([N as i64; 2]) }) } +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 2))] +#[cfg_attr( + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(sqdmlal, N = 2) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqdmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + vqaddq_s32(a, vqdmull_lane_s16::(b, c)) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v8i8" - )] - fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> uint8x8_t; - } - unsafe { _vqshlu_n_s8(a, const { int8x8_t([N as i8; 8]) }) } +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 1))] +#[cfg_attr( + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(sqdmlal, N = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqdmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + vqaddq_s64(a, vqdmull_lane_s32::(b, c)) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v16i8" - )] - fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> uint8x16_t; - } - unsafe { _vqshluq_n_s8(a, const { int8x16_t([N as i8; 16]) }) } +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + vqaddq_s32(a, vqdmull_n_s16(b, c)) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] +#[doc = "Vector widening saturating doubling multiply accumulate with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v4i16" - )] - fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> uint16x4_t; - } - unsafe { _vqshlu_n_s16(a, const { int16x4_t([N as i16; 4]) }) } +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + vqaddq_s64(a, vqdmull_n_s32(b, c)) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v8i16" - )] - fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> uint16x8_t; - } - unsafe { _vqshluq_n_s16(a, const { int16x8_t([N as i16; 8]) }) } +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + vqaddq_s32(a, vqdmull_s16(b, c)) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] +#[doc = "Signed saturating doubling multiply-add long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 5); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v2i32" - )] - fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> uint32x2_t; - } - unsafe { _vqshlu_n_s32(a, const { int32x2_t([N; 2]) }) } +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlal) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + vqaddq_s64(a, vqdmull_s32(b, c)) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 5); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v4i32" - )] - fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> uint32x4_t; - } - unsafe { _vqshluq_n_s32(a, const { int32x4_t([N; 4]) }) } +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 2))] +#[cfg_attr( + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(sqdmlsl, N = 2) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqdmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + vqsubq_s32(a, vqdmull_lane_s16::(b, c)) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { - static_assert_uimm_bits!(N, 6); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v1i64" - )] - fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> uint64x1_t; - } - unsafe { _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }) } +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 1))] +#[cfg_attr( + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(sqdmlsl, N = 1) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqdmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + vqsubq_s64(a, vqdmull_lane_s32::(b, c)) } -#[doc = "Signed saturating shift left unsigned"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshlu, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 6); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshlu.v2i64" - )] - fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> uint64x2_t; - } - unsafe { _vqshluq_n_s64(a, const { int64x2_t([N as i64; 2]) }) } +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + vqsubq_s32(a, vqdmull_n_s16(b, c)) } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] +#[doc = "Vector widening saturating doubling multiply subtract with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s32)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] - fn _vqshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - unsafe { _vqshrn_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + vqsubq_s64(a, vqdmull_n_s32(b, c)) } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s16)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] - fn _vqshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - unsafe { _vqshrn_n_s32(a, const { int32x4_t([-N; 4]) }) } +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + vqsubq_s32(a, vqdmull_s16(b, c)) } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] +#[doc = "Signed saturating doubling multiply-subtract long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s32)"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] - fn _vqshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - unsafe { _vqshrn_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmlsl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + vqsubq_s64(a, vqdmull_s32(b, c)) } -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrn.v8i8" - )] - fn _vqshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; - } - unsafe { _vqshrn_n_s16(a, N) } -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrn.v4i16" - )] - fn _vqshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; - } - unsafe { _vqshrn_n_s32(a, N) } -} -#[doc = "Signed saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrn.v2i32" - )] - fn _vqshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; - } - unsafe { _vqshrn_n_s64(a, N) } -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")] - fn _vqshrn_n_u16(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t; - } - unsafe { _vqshrn_n_u16(a, const { uint16x8_t([-N as u16; 8]) }) } -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")] - fn _vqshrn_n_u32(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t; - } - unsafe { _vqshrn_n_u32(a, const { uint32x4_t([-N as u32; 4]) }) } -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")] - fn _vqshrn_n_u64(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t; - } - unsafe { _vqshrn_n_u64(a, const { uint64x2_t([-N as u64; 2]) }) } -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshrn.v8i8" - )] - fn _vqshrn_n_u16(a: uint16x8_t, n: i32) -> uint8x8_t; - } - unsafe { _vqshrn_n_u16(a, N) } -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshrn.v4i16" - )] - fn _vqshrn_n_u32(a: uint32x4_t, n: i32) -> uint16x4_t; - } - unsafe { _vqshrn_n_u32(a, N) } -} -#[doc = "Unsigned saturating shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(uqshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.uqshrn.v2i32" - )] - fn _vqshrn_n_u64(a: uint64x2_t, n: i32) -> uint32x2_t; - } - unsafe { _vqshrn_n_u64(a, N) } -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] - fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> uint8x8_t; - } - unsafe { _vqshrun_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] - fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> uint16x4_t; - } - unsafe { _vqshrun_n_s32(a, const { int32x4_t([-N; 4]) }) } -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] - fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> uint32x2_t; - } - unsafe { _vqshrun_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrun.v8i8" - )] - fn _vqshrun_n_s16(a: int16x8_t, n: i32) -> uint8x8_t; - } - unsafe { _vqshrun_n_s16(a, N) } -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrun.v4i16" - )] - fn _vqshrun_n_s32(a: int32x4_t, n: i32) -> uint16x4_t; - } - unsafe { _vqshrun_n_s32(a, N) } -} -#[doc = "Signed saturating shift right unsigned narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(sqshrun, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sqshrun.v2i32" - )] - fn _vqshrun_n_s64(a: int64x2_t, n: i32) -> uint32x2_t; - } - unsafe { _vqshrun_n_s64(a, N) } -} -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s8)"] +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(sqdmulh, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36747,19 +37691,21 @@ pub fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe { simd_saturating_sub(a, b) } +pub fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + vqdmulh_s16(a, vdup_n_s16(vgetq_lane_s16::(b))) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s8)"] +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(sqdmulh, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36768,19 +37714,21 @@ pub fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe { simd_saturating_sub(a, b) } +pub fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + vqdmulhq_s16(a, vdupq_n_s16(vgetq_lane_s16::(b))) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s16)"] +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(sqdmulh, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36789,19 +37737,21 @@ pub fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe { simd_saturating_sub(a, b) } +pub fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + vqdmulh_s32(a, vdup_n_s32(vgetq_lane_s32::(b))) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s16)"] +#[doc = "Vector saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(sqdmulh, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36810,18 +37760,19 @@ pub fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe { simd_saturating_sub(a, b) } +pub fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + vqdmulhq_s32(a, vdupq_n_s32(vgetq_lane_s32::(b))) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s32)"] +#[doc = "Vector saturating doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(sqdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -36831,18 +37782,19 @@ pub fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe { simd_saturating_sub(a, b) } +pub fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { + let b: int16x4_t = vdup_n_s16(b); + vqdmulh_s16(a, b) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s32)"] +#[doc = "Vector saturating doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(sqdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -36852,18 +37804,19 @@ pub fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe { simd_saturating_sub(a, b) } +pub fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { + let b: int16x8_t = vdupq_n_s16(b); + vqdmulhq_s16(a, b) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s64)"] +#[doc = "Vector saturating doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(sqdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -36873,18 +37826,19 @@ pub fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe { simd_saturating_sub(a, b) } +pub fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { + let b: int32x2_t = vdup_n_s32(b); + vqdmulh_s32(a, b) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s64)"] +#[doc = "Vector saturating doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sqsub) + assert_instr(sqdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -36894,18 +37848,19 @@ pub fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe { simd_saturating_sub(a, b) } +pub fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { + let b: int32x4_t = vdupq_n_s32(b); + vqdmulhq_s32(a, b) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u8)"] +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(sqdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -36915,18 +37870,26 @@ pub fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_saturating_sub(a, b) } +pub fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulh.v4i16" + )] + fn _vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + unsafe { _vqdmulh_s16(a, b) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u8)"] +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(sqdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -36936,18 +37899,26 @@ pub fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe { simd_saturating_sub(a, b) } +pub fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulh.v8i16" + )] + fn _vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + unsafe { _vqdmulhq_s16(a, b) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u16)"] +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(sqdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -36957,18 +37928,26 @@ pub fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_saturating_sub(a, b) } +pub fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulh.v2i32" + )] + fn _vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + unsafe { _vqdmulh_s32(a, b) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u16)"] +#[doc = "Signed saturating doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(sqdmulh) )] #[cfg_attr( not(target_arch = "arm"), @@ -36978,19 +37957,28 @@ pub fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_saturating_sub(a, b) } +pub fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmulh.v4i32" + )] + fn _vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + unsafe { _vqdmulhq_s32(a, b) } } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u32)"] +#[doc = "Vector saturating doubling long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(sqdmull, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -36999,19 +37987,22 @@ pub fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_saturating_sub(a, b) } +pub fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 2); + let b = vdup_lane_s16::(b); + vqdmull_s16(a, b) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u32)"] +#[doc = "Vector saturating doubling long multiply by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(sqdmull, N = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -37020,18 +38011,20 @@ pub fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_saturating_sub(a, b) } +pub fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 1); + let b = vdup_lane_s32::(b); + vqdmull_s32(a, b) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u64)"] +#[doc = "Vector saturating doubling long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(sqdmull) )] #[cfg_attr( not(target_arch = "arm"), @@ -37041,18 +38034,18 @@ pub fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - unsafe { simd_saturating_sub(a, b) } +pub fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { + vqdmull_s16(a, vdup_n_s16(b)) } -#[doc = "Saturating subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u64)"] +#[doc = "Vector saturating doubling long multiply with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uqsub) + assert_instr(sqdmull) )] #[cfg_attr( not(target_arch = "arm"), @@ -37062,22 +38055,18 @@ pub fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe { simd_saturating_sub(a, b) } +pub fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { + vqdmull_s32(a, vdup_n_s32(b)) } -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s16)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(raddhn2) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmull) )] #[cfg_attr( not(target_arch = "arm"), @@ -37087,23 +38076,26 @@ pub fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vraddhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { - let x = vraddhn_s16(b, c); - vcombine_s8(a, x) +pub fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmull.v4i32" + )] + fn _vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; + } + unsafe { _vqdmull_s16(a, b) } } -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s32)"] +#[doc = "Signed saturating doubling multiply long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(raddhn2) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqdmull) )] #[cfg_attr( not(target_arch = "arm"), @@ -37113,23 +38105,26 @@ pub fn vraddhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vraddhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { - let x = vraddhn_s32(b, c); - vcombine_s16(a, x) +pub fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqdmull.v2i64" + )] + fn _vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; + } + unsafe { _vqdmull_s32(a, b) } } -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s64)"] +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(raddhn2) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqxtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -37139,23 +38134,26 @@ pub fn vraddhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vraddhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { - let x = vraddhn_s64(b, c); - vcombine_s32(a, x) +pub fn vqmovn_s16(a: int16x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtn.v8i8" + )] + fn _vqmovn_s16(a: int16x8_t) -> int8x8_t; + } + unsafe { _vqmovn_s16(a) } } -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u16)"] +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(raddhn2) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqxtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -37165,25 +38163,26 @@ pub fn vraddhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vraddhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { - unsafe { - let x: uint8x8_t = transmute(vraddhn_s16(transmute(b), transmute(c))); - vcombine_u8(a, x) +pub fn vqmovn_s32(a: int32x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtn.v4i16" + )] + fn _vqmovn_s32(a: int32x4_t) -> int16x4_t; } + unsafe { _vqmovn_s32(a) } } -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u32)"] +#[doc = "Signed saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(raddhn2) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sqxtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -37193,25 +38192,26 @@ pub fn vraddhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vraddhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { - unsafe { - let x: uint16x4_t = transmute(vraddhn_s32(transmute(b), transmute(c))); - vcombine_u16(a, x) +pub fn vqmovn_s64(a: int64x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtn.v2i32" + )] + fn _vqmovn_s64(a: int64x2_t) -> int32x2_t; } + unsafe { _vqmovn_s64(a) } } -#[doc = "Rounding Add returning High Narrow (high half)."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u64)"] +#[doc = "Unsigned saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(raddhn2) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uqxtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -37221,21 +38221,26 @@ pub fn vraddhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vraddhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { - unsafe { - let x: uint32x2_t = transmute(vraddhn_s64(transmute(b), transmute(c))); - vcombine_u32(a, x) +pub fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqxtn.v8i8" + )] + fn _vqmovn_u16(a: uint16x8_t) -> uint8x8_t; } + unsafe { _vqmovn_u16(a) } } -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s16)"] +#[doc = "Unsigned saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) + assert_instr(uqxtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -37245,26 +38250,26 @@ pub fn vraddhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4 target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { +pub fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v8i8" + link_name = "llvm.aarch64.neon.uqxtn.v4i16" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v8i8")] - fn _vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; + fn _vqmovn_u32(a: uint32x4_t) -> uint16x4_t; } - unsafe { _vraddhn_s16(a, b) } + unsafe { _vqmovn_u32(a) } } -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s32)"] +#[doc = "Unsigned saturating extract narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) + assert_instr(uqxtn) )] #[cfg_attr( not(target_arch = "arm"), @@ -37274,26 +38279,26 @@ pub fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { +pub fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v4i16" + link_name = "llvm.aarch64.neon.uqxtn.v2i32" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v4i16")] - fn _vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; + fn _vqmovn_u64(a: uint64x2_t) -> uint32x2_t; } - unsafe { _vraddhn_s32(a, b) } + unsafe { _vqmovn_u64(a) } } -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s64)"] +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) + assert_instr(sqxtun) )] #[cfg_attr( not(target_arch = "arm"), @@ -37303,26 +38308,26 @@ pub fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { +pub fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.raddhn.v2i32" + link_name = "llvm.aarch64.neon.sqxtun.v8i8" )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v2i32")] - fn _vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; + fn _vqmovun_s16(a: int16x8_t) -> uint8x8_t; } - unsafe { _vraddhn_s64(a, b) } + unsafe { _vqmovun_s16(a) } } -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u16)"] +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) + assert_instr(sqxtun) )] #[cfg_attr( not(target_arch = "arm"), @@ -37332,18 +38337,26 @@ pub fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { - unsafe { transmute(vraddhn_s16(transmute(a), transmute(b))) } +pub fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtun.v4i16" + )] + fn _vqmovun_s32(a: int32x4_t) -> uint16x4_t; + } + unsafe { _vqmovun_s32(a) } } -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u32)"] +#[doc = "Signed saturating extract unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) + assert_instr(sqxtun) )] #[cfg_attr( not(target_arch = "arm"), @@ -37353,18 +38366,26 @@ pub fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { - unsafe { transmute(vraddhn_s32(transmute(a), transmute(b))) } +pub fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqxtun.v2i32" + )] + fn _vqmovun_s64(a: int64x2_t) -> uint32x2_t; + } + unsafe { _vqmovun_s64(a) } } -#[doc = "Rounding Add returning High Narrow."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u64)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(raddhn) + assert_instr(sqneg) )] #[cfg_attr( not(target_arch = "arm"), @@ -37374,78 +38395,84 @@ pub fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { - unsafe { transmute(vraddhn_s64(transmute(a), transmute(b))) } +pub fn vqneg_s8(a: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqneg.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i8")] + fn _vqneg_s8(a: int8x8_t) -> int8x8_t; + } + unsafe { _vqneg_s8(a) } } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f16)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s8)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecpe) + assert_instr(sqneg) )] -#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vrecpe_f16(a: float16x4_t) -> float16x4_t { +pub fn vqnegq_s8(a: int8x16_t) -> int8x16_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v4f16" + link_name = "llvm.aarch64.neon.sqneg.v16i8" )] - fn _vrecpe_f16(a: float16x4_t) -> float16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v16i8")] + fn _vqnegq_s8(a: int8x16_t) -> int8x16_t; } - unsafe { _vrecpe_f16(a) } + unsafe { _vqnegq_s8(a) } } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f16)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecpe) + assert_instr(sqneg) )] -#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vrecpeq_f16(a: float16x8_t) -> float16x8_t { +pub fn vqneg_s16(a: int16x4_t) -> int16x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v8f16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v8f16" + link_name = "llvm.aarch64.neon.sqneg.v4i16" )] - fn _vrecpeq_f16(a: float16x8_t) -> float16x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i16")] + fn _vqneg_s16(a: int16x4_t) -> int16x4_t; } - unsafe { _vrecpeq_f16(a) } + unsafe { _vqneg_s16(a) } } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f32)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecpe) + assert_instr(sqneg) )] #[cfg_attr( not(target_arch = "arm"), @@ -37455,26 +38482,26 @@ pub fn vrecpeq_f16(a: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrecpe_f32(a: float32x2_t) -> float32x2_t { +pub fn vqnegq_s16(a: int16x8_t) -> int16x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v2f32" + link_name = "llvm.aarch64.neon.sqneg.v8i16" )] - fn _vrecpe_f32(a: float32x2_t) -> float32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i16")] + fn _vqnegq_s16(a: int16x8_t) -> int16x8_t; } - unsafe { _vrecpe_f32(a) } + unsafe { _vqnegq_s16(a) } } -#[doc = "Reciprocal estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f32)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecpe) + assert_instr(sqneg) )] #[cfg_attr( not(target_arch = "arm"), @@ -37484,26 +38511,26 @@ pub fn vrecpe_f32(a: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { +pub fn vqneg_s32(a: int32x2_t) -> int32x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecpe.v4f32" + link_name = "llvm.aarch64.neon.sqneg.v2i32" )] - fn _vrecpeq_f32(a: float32x4_t) -> float32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v2i32")] + fn _vqneg_s32(a: int32x2_t) -> int32x2_t; } - unsafe { _vrecpeq_f32(a) } + unsafe { _vqneg_s32(a) } } -#[doc = "Unsigned reciprocal estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_u32)"] +#[doc = "Signed saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urecpe) + assert_instr(sqneg) )] #[cfg_attr( not(target_arch = "arm"), @@ -37513,27 +38540,28 @@ pub fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { +pub fn vqnegq_s32(a: int32x4_t) -> int32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urecpe.v2i32" + link_name = "llvm.aarch64.neon.sqneg.v4i32" )] - fn _vrecpe_u32(a: uint32x2_t) -> uint32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i32")] + fn _vqnegq_s32(a: int32x4_t) -> int32x4_t; } - unsafe { _vrecpe_u32(a) } + unsafe { _vqnegq_s32(a) } } -#[doc = "Unsigned reciprocal estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_u32)"] +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urecpe) + assert_instr(sqrdmulh, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -37542,87 +38570,70 @@ pub fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urecpe.v4i32" - )] - fn _vrecpeq_u32(a: uint32x4_t) -> uint32x4_t; - } - unsafe { _vrecpeq_u32(a) } +pub fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + let b = vdup_lane_s16::(b); + vqrdmulh_s16(a, b) } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f16)"] +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s32)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecps) + assert_instr(sqrdmulh, LANE = 1) )] -#[target_feature(enable = "neon,fp16")] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vrecps_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v4f16" - )] - fn _vrecps_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; - } - unsafe { _vrecps_f16(a, b) } +pub fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let b = vdup_lane_s32::(b); + vqrdmulh_s32(a, b) } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f16)"] +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecps) + assert_instr(sqrdmulh, LANE = 1) )] -#[target_feature(enable = "neon,fp16")] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vrecpsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v8f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v8f16" - )] - fn _vrecpsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; - } - unsafe { _vrecpsq_f16(a, b) } +pub fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 3); + let b = vdup_laneq_s16::(b); + vqrdmulh_s16(a, b) } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f32)"] +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecps) + assert_instr(sqrdmulh, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -37631,27 +38642,22 @@ pub fn vrecpsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v2f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v2f32" - )] - fn _vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - } - unsafe { _vrecps_f32(a, b) } +pub fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let b = vdup_laneq_s32::(b); + vqrdmulh_s32(a, b) } -#[doc = "Floating-point reciprocal step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f32)"] +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frecps) + assert_instr(sqrdmulh, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -37660,1901 +38666,2582 @@ pub fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frecps.v4f32" - )] - fn _vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - } - unsafe { _vrecpsq_f32(a, b) } +pub fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 2); + let b = vdupq_lane_s16::(b); + vqrdmulhq_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f16)"] +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t { - unsafe { transmute(a) } +pub fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let b = vdupq_lane_s32::(b); + vqrdmulhq_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f16)"] +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t { - unsafe { - let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + let b = vdupq_laneq_s16::(b); + vqrdmulhq_s16(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f16)"] +#[doc = "Vector rounding saturating doubling multiply high by scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh, LANE = 1) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { - unsafe { transmute(a) } +pub fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let b = vdupq_laneq_s32::(b); + vqrdmulhq_s32(a, b) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f16)"] +#[doc = "Vector saturating rounding doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { - unsafe { - let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { + vqrdmulh_s16(a, vdup_n_s16(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f16)"] +#[doc = "Vector saturating rounding doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { - unsafe { transmute(a) } +pub fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { + vqrdmulhq_s16(a, vdupq_n_s16(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f16)"] +#[doc = "Vector saturating rounding doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { - unsafe { - let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { + vqrdmulh_s32(a, vdup_n_s32(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f16)"] +#[doc = "Vector saturating rounding doubling multiply high with scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { - unsafe { transmute(a) } +pub fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { + vqrdmulhq_s32(a, vdupq_n_s32(b)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f16)"] +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { - unsafe { - let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +pub fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmulh.v4i16" + )] + fn _vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } + unsafe { _vqrdmulh_s16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f16)"] +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { - unsafe { transmute(a) } +pub fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmulh.v8i16" + )] + fn _vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + unsafe { _vqrdmulhq_s16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f16)"] +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { - unsafe { - let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmulh.v2i32" + )] + fn _vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } + unsafe { _vqrdmulh_s32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f16)"] +#[doc = "Signed saturating rounding doubling multiply returning high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrdmulh) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { - unsafe { transmute(a) } +pub fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrdmulh.v4i32" + )] + fn _vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + unsafe { _vqrdmulhq_s32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f16)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { - unsafe { - let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v8i8" + )] + fn _vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } + unsafe { _vqrshl_s8(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f16)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { - unsafe { transmute(a) } +pub fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v16i8" + )] + fn _vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + } + unsafe { _vqrshlq_s8(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f16)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { - unsafe { - let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +pub fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v4i16" + )] + fn _vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } + unsafe { _vqrshl_s16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f16)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { - unsafe { transmute(a) } +pub fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v8i16" + )] + fn _vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + } + unsafe { _vqrshlq_s16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f16)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { - unsafe { - let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v2i32" + )] + fn _vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } + unsafe { _vqrshl_s32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f16)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { - unsafe { transmute(a) } +pub fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v4i32" + )] + fn _vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + } + unsafe { _vqrshlq_s32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f16)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { - unsafe { - let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v1i64" + )] + fn _vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; } + unsafe { _vqrshl_s64(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f16)"] +#[doc = "Signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqrshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { - unsafe { transmute(a) } +pub fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshl.v2i64" + )] + fn _vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + } + unsafe { _vqrshlq_s64(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f16)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqrshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { - unsafe { - let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v8i8" + )] + fn _vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; } + unsafe { _vqrshl_u8(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f16)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqrshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { - unsafe { transmute(a) } +pub fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v16i8" + )] + fn _vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; + } + unsafe { _vqrshlq_u8(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f16)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqrshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { - unsafe { - let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v4i16" + )] + fn _vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; } + unsafe { _vqrshl_u16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f16)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqrshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f16)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { - unsafe { - let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v8i16" + )] + fn _vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; } + unsafe { _vqrshlq_u16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f16)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqrshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { - unsafe { transmute(a) } +pub fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v2i32" + )] + fn _vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; + } + unsafe { _vqrshl_u32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f16)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqrshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { - unsafe { - let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v4i32" + )] + fn _vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; } + unsafe { _vqrshlq_u32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f16)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqrshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { - unsafe { transmute(a) } +pub fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v1i64" + )] + fn _vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; + } + unsafe { _vqrshl_u64(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f16)"] +#[doc = "Unsigned signed saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqrshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { - unsafe { - let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshl.v2i64" + )] + fn _vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; } + unsafe { _vqrshlq_u64(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f16)"] +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")] + fn _vqrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + unsafe { _vqrshrn_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } +} +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")] + fn _vqrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + unsafe { _vqrshrn_n_s32(a, const { int32x4_t([-N; 4]) }) } +} +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")] + fn _vqrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + unsafe { _vqrshrn_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } +} +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrn.v8i8" + )] + fn _vqrshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + unsafe { _vqrshrn_n_s16(a, N) } +} +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrn.v4i16" + )] + fn _vqrshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + unsafe { _vqrshrn_n_s32(a, N) } +} +#[doc = "Signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrn.v2i32" + )] + fn _vqrshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + unsafe { _vqrshrn_n_s64(a, N) } +} +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")] + fn _vqrshrn_n_u16(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t; + } + unsafe { _vqrshrn_n_u16(a, const { uint16x8_t([-N as u16; 8]) }) } +} +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")] + fn _vqrshrn_n_u32(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t; + } + unsafe { _vqrshrn_n_u32(a, const { uint32x4_t([-N as u32; 4]) }) } +} +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")] + fn _vqrshrn_n_u64(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t; + } + unsafe { _vqrshrn_n_u64(a, const { uint64x2_t([-N as u64; 2]) }) } +} +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshrn.v8i8" + )] + fn _vqrshrn_n_u16(a: uint16x8_t, n: i32) -> uint8x8_t; + } + unsafe { _vqrshrn_n_u16(a, N) } +} +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshrn.v4i16" + )] + fn _vqrshrn_n_u32(a: uint32x4_t, n: i32) -> uint16x4_t; + } + unsafe { _vqrshrn_n_u32(a, N) } +} +#[doc = "Unsigned signed saturating rounded shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqrshrn.v2i32" + )] + fn _vqrshrn_n_u64(a: uint64x2_t, n: i32) -> uint32x2_t; + } + unsafe { _vqrshrn_n_u64(a, N) } +} +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")] + fn _vqrshrun_n_s16(a: int16x8_t, n: int16x8_t) -> uint8x8_t; + } + unsafe { _vqrshrun_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } +} +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")] + fn _vqrshrun_n_s32(a: int32x4_t, n: int32x4_t) -> uint16x4_t; + } + unsafe { _vqrshrun_n_s32(a, const { int32x4_t([-N; 4]) }) } +} +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")] + fn _vqrshrun_n_s64(a: int64x2_t, n: int64x2_t) -> uint32x2_t; + } + unsafe { _vqrshrun_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } +} +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrun.v8i8" + )] + fn _vqrshrun_n_s16(a: int16x8_t, n: i32) -> uint8x8_t; + } + unsafe { _vqrshrun_n_s16(a, N) } +} +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrun.v4i16" + )] + fn _vqrshrun_n_s32(a: int32x4_t, n: i32) -> uint16x4_t; + } + unsafe { _vqrshrun_n_s32(a, N) } +} +#[doc = "Signed saturating rounded shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqrshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqrshrun.v2i32" + )] + fn _vqrshrun_n_s64(a: int64x2_t, n: i32) -> uint32x2_t; + } + unsafe { _vqrshrun_n_s64(a, N) } +} +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { - unsafe { transmute(a) } +pub fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + vqshl_s8(a, vdup_n_s8(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f16)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { - unsafe { - let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + vqshlq_s8(a, vdupq_n_s8(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f16)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { - unsafe { transmute(a) } +pub fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 4); + vqshl_s16(a, vdup_n_s16(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f16)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { - unsafe { - let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 4); + vqshlq_s16(a, vdupq_n_s16(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f32)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { - unsafe { transmute(a) } +pub fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(N, 5); + vqshl_s32(a, vdup_n_s32(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f32)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { - unsafe { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 5); + vqshlq_s32(a, vdupq_n_s32(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f32)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t { - unsafe { transmute(a) } +pub fn vqshl_n_s64(a: int64x1_t) -> int64x1_t { + static_assert_uimm_bits!(N, 6); + vqshl_s64(a, vdup_n_s64(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f32)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t { - unsafe { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 6); + vqshlq_s64(a, vdupq_n_s64(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s8)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t { - unsafe { transmute(a) } +pub fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + vqshl_u8(a, vdup_n_s8(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s8)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t { - unsafe { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + vqshlq_u8(a, vdupq_n_s8(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s8)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t { - unsafe { transmute(a) } +pub fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + vqshl_u16(a, vdup_n_s16(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s8)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t { - unsafe { - let a: int8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + vqshlq_u16(a, vdupq_n_s16(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s32)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t { - unsafe { transmute(a) } +pub fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 5); + vqshl_u32(a, vdup_n_s32(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s32)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t { - unsafe { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 5); + vqshlq_u32(a, vdupq_n_s32(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s32)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_s32(a: int32x4_t) -> float16x8_t { - unsafe { transmute(a) } +pub fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t { + static_assert_uimm_bits!(N, 6); + vqshl_u64(a, vdup_n_s64(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s32)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_s32(a: int32x4_t) -> float16x8_t { - unsafe { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 6); + vqshlq_u64(a, vdupq_n_s64(N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s64)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_s64(a: int64x1_t) -> float16x4_t { - unsafe { transmute(a) } +pub fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v8i8" + )] + fn _vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + unsafe { _vqshl_s8(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s64)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_s64(a: int64x1_t) -> float16x4_t { - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v16i8" + )] + fn _vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } + unsafe { _vqshlq_s8(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s64)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_s64(a: int64x2_t) -> float16x8_t { - unsafe { transmute(a) } +pub fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v4i16" + )] + fn _vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + unsafe { _vqshl_s16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s64)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_s64(a: int64x2_t) -> float16x8_t { - unsafe { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v8i16" + )] + fn _vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } + unsafe { _vqshlq_s16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u8)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_u8(a: uint8x8_t) -> float16x4_t { - unsafe { transmute(a) } +pub fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v2i32" + )] + fn _vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + unsafe { _vqshl_s32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u8)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_u8(a: uint8x8_t) -> float16x4_t { - unsafe { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v4i32" + )] + fn _vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } + unsafe { _vqshlq_s32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u8)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_u8(a: uint8x16_t) -> float16x8_t { - unsafe { transmute(a) } +pub fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v1i64" + )] + fn _vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + unsafe { _vqshl_s64(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u8)"] +#[doc = "Signed saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_u8(a: uint8x16_t) -> float16x8_t { - unsafe { - let a: uint8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshl.v2i64" + )] + fn _vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; } + unsafe { _vqshlq_s64(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u32)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_u32(a: uint32x2_t) -> float16x4_t { - unsafe { transmute(a) } +pub fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v8i8" + )] + fn _vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; + } + unsafe { _vqshl_u8(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u32)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_u32(a: uint32x2_t) -> float16x4_t { - unsafe { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v16i8" + )] + fn _vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; } + unsafe { _vqshlq_u8(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u32)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_u32(a: uint32x4_t) -> float16x8_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u32)"] -#[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_u32(a: uint32x4_t) -> float16x8_t { - unsafe { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v4i16" + )] + fn _vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; } + unsafe { _vqshl_u16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u64)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_u64(a: uint64x1_t) -> float16x4_t { - unsafe { transmute(a) } +pub fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v8i16" + )] + fn _vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; + } + unsafe { _vqshlq_u16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u64)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_u64(a: uint64x1_t) -> float16x4_t { - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v2i32" + )] + fn _vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; } + unsafe { _vqshl_u32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u64)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_u64(a: uint64x2_t) -> float16x8_t { - unsafe { transmute(a) } +pub fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v4i32" + )] + fn _vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; + } + unsafe { _vqshlq_u32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u64)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_u64(a: uint64x2_t) -> float16x8_t { - unsafe { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v1i64" + )] + fn _vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; } + unsafe { _vqshl_u64(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p8)"] +#[doc = "Unsigned saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_p8(a: poly8x8_t) -> float16x4_t { - unsafe { transmute(a) } +pub fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshl.v2i64" + )] + fn _vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; + } + unsafe { _vqshlq_u64(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p8)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_p8(a: poly8x8_t) -> float16x4_t { - unsafe { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")] + fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> uint8x8_t; } + unsafe { _vqshlu_n_s8(a, const { int8x8_t([N as i8; 8]) }) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p8)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_p8(a: poly8x16_t) -> float16x8_t { - unsafe { transmute(a) } +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")] + fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> uint8x16_t; + } + unsafe { _vqshluq_n_s8(a, const { int8x16_t([N as i8; 16]) }) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p8)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_p8(a: poly8x16_t) -> float16x8_t { - unsafe { - let a: poly8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")] + fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> uint16x4_t; } + unsafe { _vqshlu_n_s16(a, const { int16x4_t([N as i16; 4]) }) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f16)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t { - unsafe { transmute(a) } +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")] + fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> uint16x8_t; + } + unsafe { _vqshluq_n_s16(a, const { int16x8_t([N as i16; 8]) }) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f16)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t { - unsafe { transmute(a) } +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 5); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")] + fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> uint32x2_t; + } + unsafe { _vqshlu_n_s32(a, const { int32x2_t([N; 2]) }) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f16)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t { - unsafe { transmute(a) } +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 5); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")] + fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> uint32x4_t; + } + unsafe { _vqshluq_n_s32(a, const { int32x4_t([N; 4]) }) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f16)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t { - unsafe { transmute(a) } +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { + static_assert_uimm_bits!(N, 6); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v1i64")] + fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> uint64x1_t; + } + unsafe { _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f16)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t { - unsafe { transmute(a) } +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 6); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")] + fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> uint64x2_t; + } + unsafe { _vqshluq_n_s64(a, const { int64x2_t([N as i64; 2]) }) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f16)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t { - unsafe { transmute(a) } +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v8i8" + )] + fn _vqshlu_n_s8(a: int8x8_t, n: int8x8_t) -> uint8x8_t; + } + unsafe { _vqshlu_n_s8(a, const { int8x8_t([N as i8; 8]) }) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s16)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_s16(a: int16x4_t) -> float16x4_t { - unsafe { transmute(a) } +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v16i8" + )] + fn _vqshluq_n_s8(a: int8x16_t, n: int8x16_t) -> uint8x16_t; + } + unsafe { _vqshluq_n_s8(a, const { int8x16_t([N as i8; 16]) }) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s16)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_s16(a: int16x8_t) -> float16x8_t { - unsafe { transmute(a) } +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v4i16" + )] + fn _vqshlu_n_s16(a: int16x4_t, n: int16x4_t) -> uint16x4_t; + } + unsafe { _vqshlu_n_s16(a, const { int16x4_t([N as i16; 4]) }) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u16)"] +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v8i16" + )] + fn _vqshluq_n_s16(a: int16x8_t, n: int16x8_t) -> uint16x8_t; + } + unsafe { _vqshluq_n_s16(a, const { int16x8_t([N as i16; 8]) }) } +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 5); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v2i32" + )] + fn _vqshlu_n_s32(a: int32x2_t, n: int32x2_t) -> uint32x2_t; + } + unsafe { _vqshlu_n_s32(a, const { int32x2_t([N; 2]) }) } +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 5); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v4i32" + )] + fn _vqshluq_n_s32(a: int32x4_t, n: int32x4_t) -> uint32x4_t; + } + unsafe { _vqshluq_n_s32(a, const { int32x4_t([N; 4]) }) } +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { + static_assert_uimm_bits!(N, 6); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v1i64" + )] + fn _vqshlu_n_s64(a: int64x1_t, n: int64x1_t) -> uint64x1_t; + } + unsafe { _vqshlu_n_s64(a, const { int64x1_t([N as i64]) }) } +} +#[doc = "Signed saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshlu, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 6); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshlu.v2i64" + )] + fn _vqshluq_n_s64(a: int64x2_t, n: int64x2_t) -> uint64x2_t; + } + unsafe { _vqshluq_n_s64(a, const { int64x2_t([N as i64; 2]) }) } +} +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")] + fn _vqshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; + } + unsafe { _vqshrn_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } +} +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")] + fn _vqshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + unsafe { _vqshrn_n_s32(a, const { int32x4_t([-N; 4]) }) } +} +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")] + fn _vqshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + unsafe { _vqshrn_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } +} +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrn.v8i8" + )] + fn _vqshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + unsafe { _vqshrn_n_s16(a, N) } +} +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrn.v4i16" + )] + fn _vqshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + unsafe { _vqshrn_n_s32(a, N) } +} +#[doc = "Signed saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrn.v2i32" + )] + fn _vqshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + unsafe { _vqshrn_n_s64(a, N) } +} +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")] + fn _vqshrn_n_u16(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t; + } + unsafe { _vqshrn_n_u16(a, const { uint16x8_t([-N as u16; 8]) }) } +} +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")] + fn _vqshrn_n_u32(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t; + } + unsafe { _vqshrn_n_u32(a, const { uint32x4_t([-N as u32; 4]) }) } +} +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")] + fn _vqshrn_n_u64(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t; + } + unsafe { _vqshrn_n_u64(a, const { uint64x2_t([-N as u64; 2]) }) } +} +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshrn.v8i8" + )] + fn _vqshrn_n_u16(a: uint16x8_t, n: i32) -> uint8x8_t; + } + unsafe { _vqshrn_n_u16(a, N) } +} +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshrn.v4i16" + )] + fn _vqshrn_n_u32(a: uint32x4_t, n: i32) -> uint16x4_t; + } + unsafe { _vqshrn_n_u32(a, N) } +} +#[doc = "Unsigned saturating shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(uqshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.uqshrn.v2i32" + )] + fn _vqshrn_n_u64(a: uint64x2_t, n: i32) -> uint32x2_t; + } + unsafe { _vqshrn_n_u64(a, N) } +} +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")] + fn _vqshrun_n_s16(a: int16x8_t, n: int16x8_t) -> uint8x8_t; + } + unsafe { _vqshrun_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } +} +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")] + fn _vqshrun_n_s32(a: int32x4_t, n: int32x4_t) -> uint16x4_t; + } + unsafe { _vqshrun_n_s32(a, const { int32x4_t([-N; 4]) }) } +} +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")] + fn _vqshrun_n_s64(a: int64x2_t, n: int64x2_t) -> uint32x2_t; + } + unsafe { _vqshrun_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } +} +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrun.v8i8" + )] + fn _vqshrun_n_s16(a: int16x8_t, n: i32) -> uint8x8_t; + } + unsafe { _vqshrun_n_s16(a, N) } +} +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrun.v4i16" + )] + fn _vqshrun_n_s32(a: int32x4_t, n: i32) -> uint16x4_t; + } + unsafe { _vqshrun_n_s32(a, N) } +} +#[doc = "Signed saturating shift right unsigned narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(sqshrun, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.sqshrun.v2i32" + )] + fn _vqshrun_n_s64(a: int64x2_t, n: i32) -> uint32x2_t; + } + unsafe { _vqshrun_n_s64(a, N) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqsub) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_u16(a: uint16x4_t) -> float16x4_t { - unsafe { transmute(a) } +pub fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { simd_saturating_sub(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u16)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqsub) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_u16(a: uint16x8_t) -> float16x8_t { - unsafe { transmute(a) } +pub fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { simd_saturating_sub(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p16)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqsub) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_p16(a: poly16x4_t) -> float16x4_t { - unsafe { transmute(a) } +pub fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { simd_saturating_sub(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p16)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqsub) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_p16(a: poly16x8_t) -> float16x8_t { - unsafe { transmute(a) } +pub fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { simd_saturating_sub(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -39564,18 +41251,18 @@ pub fn vreinterpretq_f16_p16(a: poly16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { - unsafe { transmute(a) } +pub fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { simd_saturating_sub(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -39585,18 +41272,18 @@ pub fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { - unsafe { transmute(a) } +pub fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { simd_saturating_sub(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -39606,18 +41293,18 @@ pub fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { - unsafe { transmute(a) } +pub fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { simd_saturating_sub(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(sqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -39627,18 +41314,18 @@ pub fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { - unsafe { transmute(a) } +pub fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { simd_saturating_sub(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -39648,18 +41335,18 @@ pub fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { - unsafe { transmute(a) } +pub fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_saturating_sub(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -39669,18 +41356,18 @@ pub fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { - unsafe { transmute(a) } +pub fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { simd_saturating_sub(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -39690,18 +41377,18 @@ pub fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { - unsafe { transmute(a) } +pub fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_saturating_sub(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -39711,18 +41398,18 @@ pub fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { - unsafe { transmute(a) } +pub fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_saturating_sub(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -39732,18 +41419,18 @@ pub fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { - unsafe { transmute(a) } +pub fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_saturating_sub(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -39753,18 +41440,18 @@ pub fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { - unsafe { transmute(a) } +pub fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_saturating_sub(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -39774,18 +41461,18 @@ pub fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { - unsafe { transmute(a) } +pub fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { simd_saturating_sub(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(uqsub) )] #[cfg_attr( not(target_arch = "arm"), @@ -39795,18 +41482,22 @@ pub fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { - unsafe { transmute(a) } +pub fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { simd_saturating_sub(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(raddhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -39816,18 +41507,23 @@ pub fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { - unsafe { transmute(a) } +pub fn vraddhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { + let x = vraddhn_s16(b, c); + vcombine_s8(a, x) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(raddhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -39837,18 +41533,23 @@ pub fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { - unsafe { transmute(a) } +pub fn vraddhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { + let x = vraddhn_s32(b, c); + vcombine_s16(a, x) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(raddhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -39858,18 +41559,23 @@ pub fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { - unsafe { transmute(a) } +pub fn vraddhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { + let x = vraddhn_s64(b, c); + vcombine_s32(a, x) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(raddhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -39879,18 +41585,25 @@ pub fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { - unsafe { transmute(a) } +pub fn vraddhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { + unsafe { + let x: uint8x8_t = transmute(vraddhn_s16(transmute(b), transmute(c))); + vcombine_u8(a, x) + } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s64)"] +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(raddhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -39900,18 +41613,25 @@ pub fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { - unsafe { transmute(a) } +pub fn vraddhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { + unsafe { + let x: uint16x4_t = transmute(vraddhn_s32(transmute(b), transmute(c))); + vcombine_u16(a, x) + } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] +#[doc = "Rounding Add returning High Narrow (high half)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(raddhn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -39921,18 +41641,21 @@ pub fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { - unsafe { transmute(a) } +pub fn vraddhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { + unsafe { + let x: uint32x2_t = transmute(vraddhn_s64(transmute(b), transmute(c))); + vcombine_u32(a, x) + } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -39942,18 +41665,26 @@ pub fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { - unsafe { transmute(a) } +pub fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.raddhn.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v8i8")] + fn _vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; + } + unsafe { _vraddhn_s16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -39963,18 +41694,26 @@ pub fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { - unsafe { transmute(a) } +pub fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.raddhn.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v4i16")] + fn _vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; + } + unsafe { _vraddhn_s32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -39984,18 +41723,26 @@ pub fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { - unsafe { transmute(a) } +pub fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.raddhn.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vraddhn.v2i32")] + fn _vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; + } + unsafe { _vraddhn_s64(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -40005,18 +41752,18 @@ pub fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { - unsafe { transmute(a) } +pub fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + unsafe { transmute(vraddhn_s16(transmute(a), transmute(b))) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -40026,18 +41773,18 @@ pub fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { - unsafe { transmute(a) } +pub fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + unsafe { transmute(vraddhn_s32(transmute(a), transmute(b))) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] +#[doc = "Rounding Add returning High Narrow."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(raddhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -40047,60 +41794,78 @@ pub fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { - unsafe { transmute(a) } +pub fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + unsafe { transmute(vraddhn_s64(transmute(a), transmute(b))) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frecpe) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { - unsafe { transmute(a) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vrecpe_f16(a: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frecpe.v4f16" + )] + fn _vrecpe_f16(a: float16x4_t) -> float16x4_t; + } + unsafe { _vrecpe_f16(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frecpe) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { - unsafe { transmute(a) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vrecpeq_f16(a: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v8f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frecpe.v8f16" + )] + fn _vrecpeq_f16(a: float16x8_t) -> float16x8_t; + } + unsafe { _vrecpeq_f16(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frecpe) )] #[cfg_attr( not(target_arch = "arm"), @@ -40110,18 +41875,26 @@ pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { - unsafe { transmute(a) } +pub fn vrecpe_f32(a: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frecpe.v2f32" + )] + fn _vrecpe_f32(a: float32x2_t) -> float32x2_t; + } + unsafe { _vrecpe_f32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] +#[doc = "Reciprocal estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frecpe) )] #[cfg_attr( not(target_arch = "arm"), @@ -40131,18 +41904,26 @@ pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { - unsafe { transmute(a) } +pub fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frecpe.v4f32" + )] + fn _vrecpeq_f32(a: float32x4_t) -> float32x4_t; + } + unsafe { _vrecpeq_f32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] +#[doc = "Unsigned reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urecpe) )] #[cfg_attr( not(target_arch = "arm"), @@ -40152,18 +41933,26 @@ pub fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { - unsafe { transmute(a) } +pub fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urecpe.v2i32" + )] + fn _vrecpe_u32(a: uint32x2_t) -> uint32x2_t; + } + unsafe { _vrecpe_u32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] +#[doc = "Unsigned reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urecpe) )] #[cfg_attr( not(target_arch = "arm"), @@ -40173,60 +41962,86 @@ pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { - unsafe { transmute(a) } +pub fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urecpe.v4i32" + )] + fn _vrecpeq_u32(a: uint32x4_t) -> uint32x4_t; + } + unsafe { _vrecpeq_u32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u64)"] +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frecps) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { - unsafe { transmute(a) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vrecps_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frecps.v4f16" + )] + fn _vrecps_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + } + unsafe { _vrecps_f16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frecps) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { - unsafe { transmute(a) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vrecpsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v8f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frecps.v8f16" + )] + fn _vrecpsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + } + unsafe { _vrecpsq_f16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frecps) )] #[cfg_attr( not(target_arch = "arm"), @@ -40236,18 +42051,26 @@ pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { - unsafe { transmute(a) } +pub fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frecps.v2f32" + )] + fn _vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + } + unsafe { _vrecps_f32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] +#[doc = "Floating-point reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frecps) )] #[cfg_attr( not(target_arch = "arm"), @@ -40257,11 +42080,19 @@ pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { - unsafe { transmute(a) } +pub fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frecps.v4f32" + )] + fn _vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + unsafe { _vrecpsq_f32(a, b) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40272,17 +42103,18 @@ pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40293,17 +42125,18 @@ pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40314,17 +42147,18 @@ pub fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40335,17 +42169,18 @@ pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40356,17 +42191,18 @@ pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40377,21 +42213,21 @@ pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -40406,15 +42242,14 @@ pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_p128(a: p128) -> float16x8_t { +pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -40429,18 +42264,14 @@ pub fn vreinterpretq_f16_p128(a: p128) -> float16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_p128(a: p128) -> float16x8_t { - unsafe { - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -40455,15 +42286,14 @@ pub fn vreinterpretq_f16_p128(a: p128) -> float16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t { +pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -40478,18 +42308,14 @@ pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t { - unsafe { - let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -40504,15 +42330,14 @@ pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 { +pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -40527,18 +42352,14 @@ pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 { - unsafe { - let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -40553,15 +42374,14 @@ pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t { +pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -40576,19 +42396,14 @@ pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t { - unsafe { - let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -40603,15 +42418,14 @@ pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_p64(a: poly64x1_t) -> float16x4_t { +pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -40626,18 +42440,14 @@ pub fn vreinterpret_f16_p64(a: poly64x1_t) -> float16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpret_f16_p64(a: poly64x1_t) -> float16x4_t { - unsafe { - let ret_val: float16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -40652,15 +42462,14 @@ pub fn vreinterpret_f16_p64(a: poly64x1_t) -> float16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t { +pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -40675,17 +42484,12 @@ pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg(not(target_arch = "arm64ec"))] -pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t { - unsafe { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -40695,19 +42499,19 @@ pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -40717,22 +42521,19 @@ pub fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { - unsafe { - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -40742,19 +42543,19 @@ pub fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -40764,23 +42565,19 @@ pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { - unsafe { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_s32(a: int32x4_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -40790,19 +42587,19 @@ pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_s64(a: int64x1_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -40812,23 +42609,19 @@ pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { - unsafe { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_s64(a: int64x2_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -40838,19 +42631,19 @@ pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_u8(a: uint8x8_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -40860,22 +42653,19 @@ pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { - unsafe { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_u8(a: uint8x16_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -40885,19 +42675,19 @@ pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_u32(a: uint32x2_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -40907,23 +42697,19 @@ pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { - unsafe { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_u32(a: uint32x4_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -40933,19 +42719,19 @@ pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_u64(a: uint64x1_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -40955,23 +42741,19 @@ pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { - unsafe { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_u64(a: uint64x2_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -40981,19 +42763,19 @@ pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_p8(a: poly8x8_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41003,22 +42785,19 @@ pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { - unsafe { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_p8(a: poly8x16_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41028,19 +42807,19 @@ pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41050,23 +42829,19 @@ pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { - unsafe { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41076,19 +42851,19 @@ pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41098,23 +42873,19 @@ pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { - unsafe { - let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41124,19 +42895,19 @@ pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41146,22 +42917,19 @@ pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { - unsafe { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41171,19 +42939,19 @@ pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_s16(a: int16x4_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41193,27 +42961,19 @@ pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { - unsafe { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_s16(a: int16x8_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41223,19 +42983,19 @@ pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_u16(a: uint16x4_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41245,23 +43005,19 @@ pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { - unsafe { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_u16(a: uint16x8_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41271,19 +43027,19 @@ pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_p16(a: poly16x4_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41293,23 +43049,19 @@ pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { - unsafe { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_p16(a: poly16x8_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41325,13 +43077,12 @@ pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { +pub fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41347,21 +43098,12 @@ pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { - unsafe { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41377,13 +43119,12 @@ pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { +pub fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41399,17 +43140,12 @@ pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { - unsafe { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} +pub fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { + unsafe { transmute(a) } +} #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41425,13 +43161,12 @@ pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { +pub fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41447,17 +43182,12 @@ pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { - unsafe { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41473,13 +43203,12 @@ pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { +pub fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41495,21 +43224,12 @@ pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { - unsafe { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41525,13 +43245,12 @@ pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { +pub fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41547,17 +43266,12 @@ pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { - unsafe { - let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41573,13 +43287,12 @@ pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { +pub fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41595,17 +43308,12 @@ pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { - unsafe { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41621,13 +43329,12 @@ pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { +pub fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41643,17 +43350,12 @@ pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { - unsafe { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41669,13 +43371,12 @@ pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { +pub fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41691,17 +43392,12 @@ pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { - unsafe { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41717,13 +43413,12 @@ pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { +pub fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41739,16 +43434,12 @@ pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { - unsafe { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41764,13 +43455,12 @@ pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { +pub fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41786,17 +43476,12 @@ pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { - unsafe { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41812,13 +43497,12 @@ pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { +pub fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41834,17 +43518,12 @@ pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { - unsafe { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41860,13 +43539,12 @@ pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { +pub fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41882,16 +43560,12 @@ pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { - unsafe { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41907,13 +43581,12 @@ pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { +pub fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41929,17 +43602,12 @@ pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { - unsafe { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41955,13 +43623,12 @@ pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { +pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -41977,16 +43644,12 @@ pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { - unsafe { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42002,13 +43665,12 @@ pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { +pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42024,17 +43686,12 @@ pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { - unsafe { - let a: int8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42050,13 +43707,12 @@ pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { +pub fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42072,18 +43728,12 @@ pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { - unsafe { - let a: int8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42099,13 +43749,12 @@ pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { +pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42121,18 +43770,12 @@ pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { - unsafe { - let a: int8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42148,13 +43791,12 @@ pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { +pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42170,18 +43812,12 @@ pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { - unsafe { - let a: int8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42197,13 +43833,12 @@ pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { +pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42219,18 +43854,12 @@ pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { - unsafe { - let a: int8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42246,13 +43875,12 @@ pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { +pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42268,20 +43896,14 @@ pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { - unsafe { - let a: int8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p128)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -42289,21 +43911,21 @@ pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_p128(a: p128) -> float16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -42311,26 +43933,21 @@ pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { - unsafe { - let a: int8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -42338,21 +43955,21 @@ pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -42360,26 +43977,21 @@ pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { - unsafe { - let a: int8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -42387,21 +43999,21 @@ pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpret_f16_p64(a: poly64x1_t) -> float16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -42409,24 +44021,19 @@ pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { - unsafe { - let a: int8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42442,13 +44049,12 @@ pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { +pub fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42464,18 +44070,12 @@ pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { - unsafe { - let a: int8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42491,13 +44091,12 @@ pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { +pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42513,17 +44112,12 @@ pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { - unsafe { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42539,13 +44133,12 @@ pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { +pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42561,17 +44154,12 @@ pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { - unsafe { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42587,13 +44175,12 @@ pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { +pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42609,17 +44196,12 @@ pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { - unsafe { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42635,13 +44217,12 @@ pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { +pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42657,16 +44238,12 @@ pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { - unsafe { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42682,13 +44259,12 @@ pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { +pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42704,17 +44280,12 @@ pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { - unsafe { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42730,13 +44301,12 @@ pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { +pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42752,17 +44322,12 @@ pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { - unsafe { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42778,13 +44343,12 @@ pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { +pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42800,16 +44364,12 @@ pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { - unsafe { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42825,13 +44385,12 @@ pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { +pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42847,17 +44406,12 @@ pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { - unsafe { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42873,13 +44427,12 @@ pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { +pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42895,16 +44448,12 @@ pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { - unsafe { - let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42920,13 +44469,12 @@ pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { +pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42942,16 +44490,12 @@ pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { - unsafe { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42967,13 +44511,12 @@ pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { +pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -42989,17 +44532,12 @@ pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { - unsafe { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43015,13 +44553,12 @@ pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { +pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43037,21 +44574,12 @@ pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { - unsafe { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43067,13 +44595,12 @@ pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { +pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43089,17 +44616,12 @@ pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { - unsafe { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43115,13 +44637,12 @@ pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { +pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43137,17 +44658,12 @@ pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { - unsafe { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43163,13 +44679,12 @@ pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { +pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43185,21 +44700,12 @@ pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { - unsafe { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43215,13 +44721,12 @@ pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { +pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43237,17 +44742,12 @@ pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { - unsafe { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43263,13 +44763,12 @@ pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { +pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43285,17 +44784,12 @@ pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { - unsafe { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43311,13 +44805,12 @@ pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { +pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43333,21 +44826,12 @@ pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { - unsafe { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43363,13 +44847,12 @@ pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { +pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43385,17 +44868,12 @@ pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { - unsafe { - let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43411,13 +44889,12 @@ pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { +pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43433,17 +44910,12 @@ pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { - unsafe { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43459,13 +44931,12 @@ pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { +pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43481,17 +44952,12 @@ pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { - unsafe { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43507,13 +44973,12 @@ pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { +pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43529,16 +44994,12 @@ pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { - unsafe { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) - } +pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43554,13 +45015,12 @@ pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { +pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43576,17 +45036,12 @@ pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { - unsafe { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43602,13 +45057,12 @@ pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { +pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43624,17 +45078,12 @@ pub fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { - unsafe { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43650,13 +45099,12 @@ pub fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { +pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43672,16 +45120,12 @@ pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { - unsafe { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) - } +pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43697,13 +45141,12 @@ pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { +pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43719,17 +45162,12 @@ pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { - unsafe { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43745,13 +45183,12 @@ pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { +pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43767,17 +45204,12 @@ pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { - unsafe { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43793,13 +45225,12 @@ pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { +pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43815,16 +45246,12 @@ pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { - unsafe { - let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) - } +pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43840,13 +45267,12 @@ pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { +pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43862,16 +45288,12 @@ pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { - unsafe { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43887,13 +45309,12 @@ pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { +pub fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43909,21 +45330,12 @@ pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { - unsafe { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43939,13 +45351,12 @@ pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { +pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43961,17 +45372,12 @@ pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { - unsafe { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -43987,13 +45393,12 @@ pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { +pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44009,17 +45414,12 @@ pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { - unsafe { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44035,13 +45435,12 @@ pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { +pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44057,21 +45456,12 @@ pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { - unsafe { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44087,13 +45477,12 @@ pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { +pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44109,17 +45498,12 @@ pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { - unsafe { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44135,13 +45519,12 @@ pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { +pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44158,16 +45541,11 @@ pub fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { - unsafe { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44187,9 +45565,8 @@ pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44205,21 +45582,12 @@ pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { - unsafe { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44235,13 +45603,12 @@ pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { +pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44257,17 +45624,12 @@ pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { - unsafe { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44283,13 +45645,12 @@ pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { +pub fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44305,17 +45666,12 @@ pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { - unsafe { - let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44331,13 +45687,12 @@ pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { +pub fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44353,16 +45708,12 @@ pub fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44378,13 +45729,12 @@ pub fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { +pub fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44400,16 +45750,12 @@ pub fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44425,13 +45771,12 @@ pub fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { +pub fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44447,16 +45792,12 @@ pub fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44472,13 +45813,12 @@ pub fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { +pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44494,16 +45834,12 @@ pub fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44519,13 +45855,12 @@ pub fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { +pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44541,16 +45876,12 @@ pub fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44566,13 +45897,12 @@ pub fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { +pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44588,16 +45918,12 @@ pub fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44613,13 +45939,12 @@ pub fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { +pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44635,16 +45960,12 @@ pub fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44660,13 +45981,12 @@ pub fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { +pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44682,16 +46002,12 @@ pub fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44707,13 +46023,12 @@ pub fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { +pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44729,16 +46044,12 @@ pub fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44754,13 +46065,12 @@ pub fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { +pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44776,16 +46086,12 @@ pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { - unsafe { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) - } +pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44801,13 +46107,12 @@ pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { +pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44823,17 +46128,12 @@ pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { - unsafe { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44849,13 +46149,12 @@ pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { +pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44871,21 +46170,12 @@ pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { - unsafe { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44901,13 +46191,12 @@ pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { +pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44923,17 +46212,12 @@ pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { - unsafe { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44949,13 +46233,12 @@ pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { +pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44971,17 +46254,12 @@ pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { - unsafe { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -44997,13 +46275,12 @@ pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { +pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45019,21 +46296,12 @@ pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { - unsafe { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45049,13 +46317,12 @@ pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { +pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45071,17 +46338,12 @@ pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { - unsafe { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45097,13 +46359,12 @@ pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { +pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45119,17 +46380,12 @@ pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { - unsafe { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45145,13 +46401,12 @@ pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { +pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45167,21 +46422,12 @@ pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { - unsafe { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45197,13 +46443,12 @@ pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { +pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45219,17 +46464,12 @@ pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { - unsafe { - let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45245,13 +46485,12 @@ pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { +pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45267,17 +46506,12 @@ pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { - unsafe { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45293,13 +46527,12 @@ pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { +pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45315,17 +46548,12 @@ pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { - unsafe { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45341,13 +46569,12 @@ pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { +pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45363,17 +46590,12 @@ pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { - unsafe { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45389,13 +46611,12 @@ pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { +pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45411,16 +46632,12 @@ pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { - unsafe { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45436,13 +46653,12 @@ pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { +pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45458,17 +46674,12 @@ pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { - unsafe { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45484,13 +46695,12 @@ pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { +pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45506,17 +46716,12 @@ pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { - unsafe { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45532,13 +46737,12 @@ pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { +pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45554,16 +46758,12 @@ pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { - unsafe { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45579,13 +46779,12 @@ pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { +pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45601,17 +46800,12 @@ pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { - unsafe { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45627,13 +46821,12 @@ pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { +pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45649,16 +46842,12 @@ pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { - unsafe { - let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45674,13 +46863,12 @@ pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { +pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45696,17 +46884,12 @@ pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { - unsafe { - let a: uint8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45722,13 +46905,12 @@ pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { +pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45744,18 +46926,12 @@ pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { - unsafe { - let a: uint8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45771,13 +46947,12 @@ pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { +pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45793,18 +46968,12 @@ pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { - unsafe { - let a: uint8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45820,13 +46989,12 @@ pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { +pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45842,18 +47010,12 @@ pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { - unsafe { - let a: uint8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45869,13 +47031,12 @@ pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { +pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45891,18 +47052,12 @@ pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { - unsafe { - let a: uint8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45918,13 +47073,12 @@ pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { +pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45940,18 +47094,12 @@ pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { - unsafe { - let a: uint8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45967,13 +47115,12 @@ pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { +pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -45989,18 +47136,12 @@ pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { - unsafe { - let a: uint8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46016,13 +47157,12 @@ pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { +pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46038,18 +47178,12 @@ pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { - unsafe { - let a: uint8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46065,13 +47199,12 @@ pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { +pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46087,18 +47220,12 @@ pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { - unsafe { - let a: uint8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46114,13 +47241,12 @@ pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { +pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46136,18 +47262,12 @@ pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { - unsafe { - let a: uint8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46163,13 +47283,12 @@ pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { +pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46185,17 +47304,12 @@ pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { - unsafe { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46211,13 +47325,12 @@ pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { +pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46233,17 +47346,12 @@ pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { - unsafe { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46259,13 +47367,12 @@ pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { +pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46281,17 +47388,12 @@ pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { - unsafe { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46307,13 +47409,12 @@ pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { +pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46329,16 +47430,12 @@ pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { - unsafe { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46354,13 +47451,12 @@ pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { +pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46376,17 +47472,12 @@ pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { - unsafe { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46402,13 +47493,12 @@ pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { +pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46424,17 +47514,12 @@ pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { - unsafe { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46450,13 +47535,12 @@ pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { +pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46472,16 +47556,12 @@ pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { - unsafe { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46497,13 +47577,12 @@ pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { +pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46519,17 +47598,12 @@ pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { - unsafe { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46545,13 +47619,12 @@ pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { +pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46567,16 +47640,12 @@ pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { - unsafe { - let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46592,13 +47661,12 @@ pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { +pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46614,16 +47682,12 @@ pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { - unsafe { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46639,13 +47703,12 @@ pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { +pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46661,17 +47724,12 @@ pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { - unsafe { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46687,13 +47745,12 @@ pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { +pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46709,21 +47766,12 @@ pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { - unsafe { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46739,13 +47787,12 @@ pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { +pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46761,17 +47808,12 @@ pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { - unsafe { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46787,13 +47829,12 @@ pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { +pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46809,17 +47850,12 @@ pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { - unsafe { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46835,13 +47871,12 @@ pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { +pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46857,21 +47892,12 @@ pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { - unsafe { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46887,13 +47913,12 @@ pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { +pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46909,17 +47934,12 @@ pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { - unsafe { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46935,13 +47955,12 @@ pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { +pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46957,17 +47976,12 @@ pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { - unsafe { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46983,13 +47997,12 @@ pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { +pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47005,21 +48018,12 @@ pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { - unsafe { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47035,13 +48039,12 @@ pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { +pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47057,17 +48060,12 @@ pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { - unsafe { - let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47083,13 +48081,12 @@ pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { +pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47105,17 +48102,12 @@ pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { - unsafe { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47131,13 +48123,12 @@ pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { +pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47153,17 +48144,12 @@ pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { - unsafe { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47179,13 +48165,12 @@ pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { +pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47201,16 +48186,12 @@ pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { - unsafe { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) - } +pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47226,13 +48207,12 @@ pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { +pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47248,17 +48228,12 @@ pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { - unsafe { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47274,13 +48249,12 @@ pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { +pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47296,17 +48270,12 @@ pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { - unsafe { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47322,13 +48291,12 @@ pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { +pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47344,16 +48312,12 @@ pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { - unsafe { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) - } +pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47369,13 +48333,12 @@ pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { +pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47391,17 +48354,12 @@ pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { - unsafe { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47417,13 +48375,12 @@ pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { +pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47439,17 +48396,12 @@ pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { - unsafe { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47465,13 +48417,12 @@ pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { +pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47487,16 +48438,12 @@ pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { - unsafe { - let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) - } +pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47512,13 +48459,12 @@ pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { +pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47534,16 +48480,12 @@ pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { - unsafe { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) - } +pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47559,13 +48501,12 @@ pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { +pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47581,21 +48522,12 @@ pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { - unsafe { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47611,13 +48543,12 @@ pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { +pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47633,17 +48564,12 @@ pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { - unsafe { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47659,13 +48585,12 @@ pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { +pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47681,17 +48606,12 @@ pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { - unsafe { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47707,13 +48627,12 @@ pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { +pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47729,21 +48648,12 @@ pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { - unsafe { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47759,13 +48669,12 @@ pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { +pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47781,17 +48690,12 @@ pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { - unsafe { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47807,13 +48711,12 @@ pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { +pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -47829,19 +48732,14 @@ pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { - unsafe { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47855,15 +48753,14 @@ pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { +pub fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47877,23 +48774,14 @@ pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { - unsafe { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47907,15 +48795,14 @@ pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { +pub fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47929,19 +48816,14 @@ pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { - unsafe { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47955,15 +48837,14 @@ pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { +pub fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -47977,19 +48858,14 @@ pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { - unsafe { - let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48003,15 +48879,14 @@ pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { +pub fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48025,18 +48900,14 @@ pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { - unsafe { - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48050,15 +48921,14 @@ pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { +pub fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48072,18 +48942,14 @@ pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { + unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48097,15 +48963,14 @@ pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { +pub fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { unsafe { transmute(a) } } #[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -48119,22 +48984,19 @@ pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { + unsafe { transmute(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -48144,19 +49006,19 @@ pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { - unsafe { transmute(a) } +pub fn vrev16_p8(a: poly8x8_t) -> poly8x8_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -48166,22 +49028,23 @@ pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { +pub fn vrev16_p8(a: poly8x8_t) -> poly8x8_t { unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -48191,19 +49054,19 @@ pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { - unsafe { transmute(a) } +pub fn vrev16_s8(a: int8x8_t) -> int8x8_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -48213,22 +49076,23 @@ pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { +pub fn vrev16_s8(a: int8x8_t) -> int8x8_t { unsafe { - let ret_val: uint8x8_t = transmute(a); + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -48238,19 +49102,19 @@ pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { - unsafe { transmute(a) } +pub fn vrev16_u8(a: uint8x8_t) -> uint8x8_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -48260,22 +49124,23 @@ pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { +pub fn vrev16_u8(a: uint8x8_t) -> uint8x8_t { unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16q_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -48285,19 +49150,19 @@ pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { - unsafe { transmute(a) } +pub fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16q_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -48307,22 +49172,29 @@ pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { +pub fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t { unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = + simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16q_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -48332,19 +49204,19 @@ pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { - unsafe { transmute(a) } +pub fn vrev16q_s8(a: int8x16_t) -> int8x16_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16q_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -48354,22 +49226,29 @@ pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { +pub fn vrev16q_s8(a: int8x16_t) -> int8x16_t { unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = + simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16q_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -48379,19 +49258,19 @@ pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { - unsafe { transmute(a) } +pub fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16q_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev16) )] #[cfg_attr( not(target_arch = "arm"), @@ -48401,22 +49280,29 @@ pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { +pub fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t { unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = + simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48426,19 +49312,19 @@ pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { - unsafe { transmute(a) } +pub fn vrev32_p16(a: poly16x4_t) -> poly16x4_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48448,22 +49334,23 @@ pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { +pub fn vrev32_p16(a: poly16x4_t) -> poly16x4_t { unsafe { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - transmute(a) + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly16x4_t = simd_shuffle!(a, a, [1, 0, 3, 2]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48473,19 +49360,19 @@ pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { - unsafe { transmute(a) } +pub fn vrev32_p8(a: poly8x8_t) -> poly8x8_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48495,23 +49382,23 @@ pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { +pub fn vrev32_p8(a: poly8x8_t) -> poly8x8_t { unsafe { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48521,19 +49408,19 @@ pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { - unsafe { transmute(a) } +pub fn vrev32_s16(a: int16x4_t) -> int16x4_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48543,27 +49430,23 @@ pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { +pub fn vrev32_s16(a: int16x4_t) -> int16x4_t { unsafe { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int16x4_t = simd_shuffle!(a, a, [1, 0, 3, 2]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48573,19 +49456,19 @@ pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { - unsafe { transmute(a) } +pub fn vrev32_s8(a: int8x8_t) -> int8x8_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48595,23 +49478,23 @@ pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { +pub fn vrev32_s8(a: int8x8_t) -> int8x8_t { unsafe { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int16x8_t = transmute(a); + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48621,19 +49504,19 @@ pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { - unsafe { transmute(a) } +pub fn vrev32_u16(a: uint16x4_t) -> uint16x4_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48643,23 +49526,23 @@ pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { +pub fn vrev32_u16(a: uint16x4_t) -> uint16x4_t { unsafe { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int32x4_t = transmute(a); + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint16x4_t = simd_shuffle!(a, a, [1, 0, 3, 2]); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48669,19 +49552,19 @@ pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { - unsafe { transmute(a) } +pub fn vrev32_u8(a: uint8x8_t) -> uint8x8_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48691,27 +49574,23 @@ pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { +pub fn vrev32_u8(a: uint8x8_t) -> uint8x8_t { unsafe { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48721,19 +49600,19 @@ pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { - unsafe { transmute(a) } +pub fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48743,23 +49622,23 @@ pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { +pub fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t { unsafe { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint16x8_t = transmute(a); + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48769,19 +49648,19 @@ pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { - unsafe { transmute(a) } +pub fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48791,23 +49670,29 @@ pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { +pub fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t { unsafe { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = + simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48817,19 +49702,19 @@ pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { - unsafe { transmute(a) } +pub fn vrev32q_s16(a: int16x8_t) -> int16x8_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48839,27 +49724,23 @@ pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { +pub fn vrev32q_s16(a: int16x8_t) -> int16x8_t { unsafe { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48869,19 +49750,19 @@ pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { - unsafe { transmute(a) } +pub fn vrev32q_s8(a: int8x16_t) -> int8x16_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48891,23 +49772,29 @@ pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { +pub fn vrev32q_s8(a: int8x16_t) -> int8x16_t { unsafe { - let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = + simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48917,19 +49804,19 @@ pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { - unsafe { transmute(a) } +pub fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48939,23 +49826,23 @@ pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { +pub fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t { unsafe { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48965,19 +49852,19 @@ pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { - unsafe { transmute(a) } +pub fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev32) )] #[cfg_attr( not(target_arch = "arm"), @@ -48987,23 +49874,29 @@ pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { +pub fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t { unsafe { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = + simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_f32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49013,19 +49906,19 @@ pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { - unsafe { transmute(a) } +pub fn vrev64_f32(a: float32x2_t) -> float32x2_t { + unsafe { simd_shuffle!(a, a, [1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_f32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49035,23 +49928,23 @@ pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { +pub fn vrev64_f32(a: float32x2_t) -> float32x2_t { unsafe { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x2_t = transmute(a); + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: float32x2_t = simd_shuffle!(a, a, [1, 0]); simd_shuffle!(ret_val, ret_val, [1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49061,19 +49954,19 @@ pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { - unsafe { transmute(a) } +pub fn vrev64_p16(a: poly16x4_t) -> poly16x4_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49083,22 +49976,23 @@ pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { +pub fn vrev64_p16(a: poly16x4_t) -> poly16x4_t { unsafe { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49108,19 +50002,19 @@ pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { - unsafe { transmute(a) } +pub fn vrev64_p8(a: poly8x8_t) -> poly8x8_t { + unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49130,23 +50024,23 @@ pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { +pub fn vrev64_p8(a: poly8x8_t) -> poly8x8_t { unsafe { let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let ret_val: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49156,19 +50050,19 @@ pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { - unsafe { transmute(a) } +pub fn vrev64_s16(a: int16x4_t) -> int16x4_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49178,23 +50072,23 @@ pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { +pub fn vrev64_s16(a: int16x4_t) -> int16x4_t { unsafe { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49204,19 +50098,19 @@ pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { - unsafe { transmute(a) } +pub fn vrev64_s32(a: int32x2_t) -> int32x2_t { + unsafe { simd_shuffle!(a, a, [1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49226,22 +50120,23 @@ pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { +pub fn vrev64_s32(a: int32x2_t) -> int32x2_t { unsafe { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: int32x2_t = simd_shuffle!(a, a, [1, 0]); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49251,19 +50146,19 @@ pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { - unsafe { transmute(a) } +pub fn vrev64_s8(a: int8x8_t) -> int8x8_t { + unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49273,23 +50168,23 @@ pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { +pub fn vrev64_s8(a: int8x8_t) -> int8x8_t { unsafe { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49299,19 +50194,19 @@ pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { - unsafe { transmute(a) } +pub fn vrev64_u16(a: uint16x4_t) -> uint16x4_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49321,22 +50216,23 @@ pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { +pub fn vrev64_u16(a: uint16x4_t) -> uint16x4_t { unsafe { - let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49346,19 +50242,19 @@ pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { - unsafe { transmute(a) } +pub fn vrev64_u32(a: uint32x2_t) -> uint32x2_t { + unsafe { simd_shuffle!(a, a, [1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49368,23 +50264,23 @@ pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { +pub fn vrev64_u32(a: uint32x2_t) -> uint32x2_t { unsafe { - let a: poly8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let ret_val: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49394,19 +50290,19 @@ pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { - unsafe { transmute(a) } +pub fn vrev64_u8(a: uint8x8_t) -> uint8x8_t { + unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49416,24 +50312,23 @@ pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { +pub fn vrev64_u8(a: uint8x8_t) -> uint8x8_t { unsafe { - let a: poly8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_f32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49443,19 +50338,19 @@ pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { - unsafe { transmute(a) } +pub fn vrev64q_f32(a: float32x4_t) -> float32x4_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_f32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49465,24 +50360,23 @@ pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { +pub fn vrev64q_f32(a: float32x4_t) -> float32x4_t { unsafe { - let a: poly8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float32x4_t = simd_shuffle!(a, a, [1, 0, 3, 2]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_p16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49492,19 +50386,19 @@ pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { - unsafe { transmute(a) } +pub fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_p16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49514,24 +50408,23 @@ pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { +pub fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t { unsafe { - let a: poly8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_p8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49541,19 +50434,19 @@ pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { - unsafe { transmute(a) } +pub fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t { + unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_p8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49563,24 +50456,29 @@ pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { +pub fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t { unsafe { let a: poly8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: poly8x16_t = + simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_s16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49590,19 +50488,19 @@ pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { - unsafe { transmute(a) } +pub fn vrev64q_s16(a: int16x8_t) -> int16x8_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_s16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49612,24 +50510,23 @@ pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { +pub fn vrev64q_s16(a: int16x8_t) -> int16x8_t { unsafe { - let a: poly8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint16x8_t = transmute(a); + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_s32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49639,19 +50536,19 @@ pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { - unsafe { transmute(a) } +pub fn vrev64q_s32(a: int32x4_t) -> int32x4_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_s32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49661,24 +50558,23 @@ pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { +pub fn vrev64q_s32(a: int32x4_t) -> int32x4_t { unsafe { - let a: poly8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x4_t = transmute(a); + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: int32x4_t = simd_shuffle!(a, a, [1, 0, 3, 2]); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_s8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49688,19 +50584,19 @@ pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { - unsafe { transmute(a) } +pub fn vrev64q_s8(a: int8x16_t) -> int8x16_t { + unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_s8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49710,24 +50606,29 @@ pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { +pub fn vrev64q_s8(a: int8x16_t) -> int8x16_t { unsafe { - let a: poly8x16_t = + let a: int8x16_t = simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let ret_val: int8x16_t = + simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_u16)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49737,19 +50638,19 @@ pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { - unsafe { transmute(a) } +pub fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_u16)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49759,24 +50660,23 @@ pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { +pub fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t { unsafe { - let a: poly8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly16x8_t = transmute(a); + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_u32)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49786,19 +50686,19 @@ pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { - unsafe { transmute(a) } +pub fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t { + unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_u32)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49808,24 +50708,23 @@ pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { +pub fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t { unsafe { - let a: poly8x16_t = - simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: uint32x4_t = simd_shuffle!(a, a, [1, 0, 3, 2]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_u8)"] #[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49835,19 +50734,19 @@ pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { - unsafe { transmute(a) } +pub fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t { + unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] +#[doc = "Reversing vector elements (swap endianness)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_u8)"] #[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] #[cfg_attr( not(target_arch = "arm"), @@ -49857,119 +50756,128 @@ pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { +pub fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t { unsafe { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: float32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = + simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] +#[doc = "Reverse elements in 64-bit doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_f16)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrev64))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { - unsafe { transmute(a) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vrev64_f16(a: float16x4_t) -> float16x4_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] +#[doc = "Reverse elements in 64-bit doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_f16)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrev64))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vrev64_f16(a: float16x4_t) -> float16x4_t { unsafe { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let ret_val: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] +#[doc = "Reverse elements in 64-bit doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_f16)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrev64))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { - unsafe { transmute(a) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vrev64q_f16(a: float16x8_t) -> float16x8_t { + unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] +#[doc = "Reverse elements in 64-bit doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_f16)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrev64))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rev64) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vrev64q_f16(a: float16x8_t) -> float16x8_t { unsafe { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -49979,19 +50887,26 @@ pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { - unsafe { transmute(a) } +pub fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i8")] + fn _vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + unsafe { _vrhadd_s8(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -50001,22 +50916,26 @@ pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { - unsafe { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +pub fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v16i8")] + fn _vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } + unsafe { _vrhaddq_s8(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -50026,19 +50945,26 @@ pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { - unsafe { transmute(a) } +pub fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i16")] + fn _vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + unsafe { _vrhadd_s16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -50048,23 +50974,26 @@ pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { - unsafe { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i16")] + fn _vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } + unsafe { _vrhaddq_s16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -50074,19 +51003,26 @@ pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { - unsafe { transmute(a) } +pub fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v2i32")] + fn _vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + unsafe { _vrhadd_s32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -50096,23 +51032,26 @@ pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { - unsafe { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srhadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i32")] + fn _vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } + unsafe { _vrhaddq_s32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -50122,19 +51061,26 @@ pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { - unsafe { transmute(a) } +pub fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i8")] + fn _vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; + } + unsafe { _vrhadd_u8(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -50144,22 +51090,26 @@ pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { - unsafe { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +pub fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v16i8")] + fn _vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; } + unsafe { _vrhaddq_u8(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -50169,19 +51119,26 @@ pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { - unsafe { transmute(a) } +pub fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v4i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i16")] + fn _vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; + } + unsafe { _vrhadd_u16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -50191,23 +51148,26 @@ pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { - unsafe { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +pub fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v8i16" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i16")] + fn _vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; } + unsafe { _vrhaddq_u16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -50217,19 +51177,26 @@ pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { - unsafe { transmute(a) } +pub fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v2i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v2i32")] + fn _vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; + } + unsafe { _vrhadd_u32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urhadd) )] #[cfg_attr( not(target_arch = "arm"), @@ -50239,69 +51206,84 @@ pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { - unsafe { - let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); - transmute(a) +pub fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urhadd.v4i32" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i32")] + fn _vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; } + unsafe { _vrhaddq_u32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frintn) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { - unsafe { transmute(a) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vrndn_f16(a: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec", target_arch = "arm"), + link_name = "llvm.roundeven.v4f16" + )] + fn _vrndn_f16(a: float16x4_t) -> float16x4_t; + } + unsafe { _vrndn_f16(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frintn) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { - unsafe { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - transmute(a) +#[cfg(not(target_arch = "arm64ec"))] +pub fn vrndnq_f16(a: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec", target_arch = "arm"), + link_name = "llvm.roundeven.v8f16" + )] + fn _vrndnq_f16(a: float16x8_t) -> float16x8_t; } + unsafe { _vrndnq_f16(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frintn) )] #[cfg_attr( not(target_arch = "arm"), @@ -50311,19 +51293,25 @@ pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { - unsafe { transmute(a) } +pub fn vrndn_f32(a: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec", target_arch = "arm"), + link_name = "llvm.roundeven.v2f32" + )] + fn _vrndn_f32(a: float32x2_t) -> float32x2_t; + } + unsafe { _vrndn_f32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] +#[doc = "Floating-point round to integral, to nearest with ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] +#[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frintn) )] #[cfg_attr( not(target_arch = "arm"), @@ -50333,23 +51321,25 @@ pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { - unsafe { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: float32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub fn vrndnq_f32(a: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec", target_arch = "arm"), + link_name = "llvm.roundeven.v4f32" + )] + fn _vrndnq_f32(a: float32x4_t) -> float32x4_t; } + unsafe { _vrndnq_f32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -50359,19 +51349,26 @@ pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { - unsafe { transmute(a) } +pub fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v8i8" + )] + fn _vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + } + unsafe { _vrshl_s8(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -50381,27 +51378,26 @@ pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { - unsafe { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v16i8" + )] + fn _vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } + unsafe { _vrshlq_s8(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -50411,19 +51407,26 @@ pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { - unsafe { transmute(a) } +pub fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v4i16" + )] + fn _vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + } + unsafe { _vrshl_s16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -50433,23 +51436,26 @@ pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { - unsafe { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v8i16" + )] + fn _vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } + unsafe { _vrshlq_s16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -50459,19 +51465,26 @@ pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { - unsafe { transmute(a) } +pub fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v2i32" + )] + fn _vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + } + unsafe { _vrshl_s32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -50481,23 +51494,26 @@ pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { - unsafe { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v4i32" + )] + fn _vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } + unsafe { _vrshlq_s32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -50507,19 +51523,26 @@ pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { - unsafe { transmute(a) } +pub fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v1i64" + )] + fn _vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + } + unsafe { _vrshl_s64(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] +#[doc = "Signed rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -50529,27 +51552,26 @@ pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { - unsafe { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.srshl.v2i64" + )] + fn _vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; } + unsafe { _vrshlq_s64(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -50559,19 +51581,26 @@ pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { - unsafe { transmute(a) } +pub fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v8i8" + )] + fn _vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; + } + unsafe { _vrshl_u8(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -50581,23 +51610,26 @@ pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { - unsafe { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v16i8" + )] + fn _vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; } + unsafe { _vrshlq_u8(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -50607,19 +51639,26 @@ pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { - unsafe { transmute(a) } +pub fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v4i16" + )] + fn _vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; + } + unsafe { _vrshl_u16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -50629,23 +51668,26 @@ pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { - unsafe { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: uint64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v8i16" + )] + fn _vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; } + unsafe { _vrshlq_u16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -50655,19 +51697,26 @@ pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { - unsafe { transmute(a) } +pub fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v2i32" + )] + fn _vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; + } + unsafe { _vrshl_u32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -50677,27 +51726,26 @@ pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { - unsafe { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v4i32" + )] + fn _vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; } + unsafe { _vrshlq_u32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -50707,19 +51755,26 @@ pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { - unsafe { transmute(a) } +pub fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v1i64" + )] + fn _vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; + } + unsafe { _vrshl_u64(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] +#[doc = "Unsigned rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -50729,24 +51784,28 @@ pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { - unsafe { - let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: poly64x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) +pub fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.urshl.v2i64" + )] + fn _vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; } + unsafe { _vrshlq_u64(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -50755,20 +51814,21 @@ pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { - unsafe { transmute(a) } +pub fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + vrshl_s8(a, vdup_n_s8(-N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -50777,23 +51837,21 @@ pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { - unsafe { - let ret_val: int8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + vrshlq_s8(a, vdupq_n_s8(-N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -50802,20 +51860,21 @@ pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { - unsafe { transmute(a) } +pub fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + vrshl_s16(a, vdup_n_s16(-N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -50824,23 +51883,21 @@ pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { - unsafe { - let ret_val: int16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + vrshlq_s16(a, vdupq_n_s16(-N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -50849,20 +51906,21 @@ pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { - unsafe { transmute(a) } +pub fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + vrshl_s32(a, vdup_n_s32(-N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -50871,23 +51929,21 @@ pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { - unsafe { - let ret_val: int32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + vrshlq_s32(a, vdupq_n_s32(-N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -50896,20 +51952,21 @@ pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { - unsafe { transmute(a) } +pub fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { + static_assert!(N >= 1 && N <= 64); + vrshl_s64(a, vdup_n_s64(-N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] +#[doc = "Signed rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -50918,23 +51975,21 @@ pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { - unsafe { - let ret_val: uint8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + vrshlq_s64(a, vdupq_n_s64(-N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -50943,20 +51998,21 @@ pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { - unsafe { transmute(a) } +pub fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + vrshl_u8(a, vdup_n_s8(-N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -50965,23 +52021,21 @@ pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { - unsafe { - let ret_val: uint16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } +pub fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + vrshlq_u8(a, vdupq_n_s8(-N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -50990,20 +52044,21 @@ pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { - unsafe { transmute(a) } +pub fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + vrshl_u16(a, vdup_n_s16(-N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51012,23 +52067,21 @@ pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { - unsafe { - let ret_val: uint32x2_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } +pub fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + vrshlq_u16(a, vdupq_n_s16(-N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51037,20 +52090,21 @@ pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { - unsafe { transmute(a) } +pub fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + vrshl_u32(a, vdup_n_s32(-N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51059,23 +52113,21 @@ pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { - unsafe { - let ret_val: poly8x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + vrshlq_u32(a, vdupq_n_s32(-N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51084,20 +52136,21 @@ pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { - unsafe { transmute(a) } +pub fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 1 && N <= 64); + vrshl_u64(a, vdup_n_s64(-N as _)) } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] +#[doc = "Unsigned rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u64)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(urshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51106,23 +52159,126 @@ pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { - unsafe { - let ret_val: poly16x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + vrshlq_u64(a, vdupq_n_s64(-N as _)) +} +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")] + fn _vrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; } + unsafe { _vrshrn_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")] + fn _vrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; + } + unsafe { _vrshrn_n_s32(a, const { int32x4_t([-N; 4]) }) } +} +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vrshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")] + fn _vrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; + } + unsafe { _vrshrn_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } +} +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(rshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rshrn.v8i8" + )] + fn _vrshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; + } + unsafe { _vrshrn_n_s16(a, N) } +} +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(rshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rshrn.v4i16" + )] + fn _vrshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; + } + unsafe { _vrshrn_n_s32(a, N) } +} +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(rshrn, N = 2))] +#[rustc_legacy_const_generics(1)] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rshrn.v2i32" + )] + fn _vrshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; + } + unsafe { _vrshrn_n_s64(a, N) } +} +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u16)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rshrn, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51131,20 +52287,21 @@ pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { - unsafe { transmute(a) } +pub fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe { transmute(vrshrn_n_s16::(transmute(a))) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rshrn, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51153,28 +52310,21 @@ pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { - unsafe { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe { transmute(vrshrn_n_s32::(transmute(a))) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] +#[doc = "Rounding shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u64)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(rshrn, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51183,67 +52333,79 @@ pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { - unsafe { transmute(a) } +pub fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe { transmute(vrshrn_n_s64::(transmute(a))) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frsqrte) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { - unsafe { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg(not(target_arch = "arm64ec"))] +pub fn vrsqrte_f16(a: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.v4f16" + )] + fn _vrsqrte_f16(a: float16x4_t) -> float16x4_t; } + unsafe { _vrsqrte_f16(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frsqrte) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { - unsafe { transmute(a) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vrsqrteq_f16(a: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v8f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.v8f16" + )] + fn _vrsqrteq_f16(a: float16x8_t) -> float16x8_t; + } + unsafe { _vrsqrteq_f16(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frsqrte) )] #[cfg_attr( not(target_arch = "arm"), @@ -51253,23 +52415,26 @@ pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { - unsafe { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: int32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.v2f32" + )] + fn _vrsqrte_f32(a: float32x2_t) -> float32x2_t; } + unsafe { _vrsqrte_f32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] +#[doc = "Reciprocal square-root estimate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frsqrte) )] #[cfg_attr( not(target_arch = "arm"), @@ -51279,19 +52444,26 @@ pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { - unsafe { transmute(a) } +pub fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrte.v4f32" + )] + fn _vrsqrteq_f32(a: float32x4_t) -> float32x4_t; + } + unsafe { _vrsqrteq_f32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] +#[doc = "Unsigned reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_u32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ursqrte) )] #[cfg_attr( not(target_arch = "arm"), @@ -51301,27 +52473,26 @@ pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { - unsafe { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) +pub fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ursqrte.v2i32" + )] + fn _vrsqrte_u32(a: uint32x2_t) -> uint32x2_t; } + unsafe { _vrsqrte_u32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] +#[doc = "Unsigned reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_u32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ursqrte) )] #[cfg_attr( not(target_arch = "arm"), @@ -51331,67 +52502,86 @@ pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { - unsafe { transmute(a) } +pub fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ursqrte.v4i32" + )] + fn _vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t; + } + unsafe { _vrsqrteq_u32(a) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f16)"] #[inline] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frsqrts) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { - unsafe { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[cfg(not(target_arch = "arm64ec"))] +pub fn vrsqrts_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v4f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrts.v4f16" + )] + fn _vrsqrts_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; } + unsafe { _vrsqrts_f16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f16)"] #[inline] -#[cfg(target_endian = "little")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frsqrts) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { - unsafe { transmute(a) } +#[cfg(not(target_arch = "arm64ec"))] +pub fn vrsqrtsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v8f16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrts.v8f16" + )] + fn _vrsqrtsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + } + unsafe { _vrsqrtsq_f16(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f32)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frsqrts) )] #[cfg_attr( not(target_arch = "arm"), @@ -51401,23 +52591,26 @@ pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { - unsafe { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: uint32x4_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) +pub fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v2f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrts.v2f32" + )] + fn _vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; } + unsafe { _vrsqrts_f32(a, b) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] +#[doc = "Floating-point reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f32)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(frsqrts) )] #[cfg_attr( not(target_arch = "arm"), @@ -51427,20 +52620,28 @@ pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { - unsafe { transmute(a) } -} -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] +pub fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v4f32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.frsqrts.v4f32" + )] + fn _vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + } + unsafe { _vrsqrtsq_f32(a, b) } +} +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s8)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srsra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51449,28 +52650,21 @@ pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { - unsafe { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly8x16_t = transmute(a); - simd_shuffle!( - ret_val, - ret_val, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - } +pub fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe { simd_add(a, vrshr_n_s8::(b)) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s8)"] #[inline] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srsra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51479,20 +52673,21 @@ pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { - unsafe { transmute(a) } +pub fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + unsafe { simd_add(a, vrshrq_n_s8::(b)) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s16)"] #[inline] -#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srsra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51501,23 +52696,21 @@ pub fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { - unsafe { - let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); - let ret_val: poly16x8_t = transmute(a); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } +pub fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe { simd_add(a, vrshr_n_s16::(b)) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s16)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srsra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51526,19 +52719,21 @@ pub fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { - unsafe { transmute(a) } +pub fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + unsafe { simd_add(a, vrshrq_n_s16::(b)) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s32)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srsra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51547,19 +52742,21 @@ pub fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { - unsafe { transmute(a) } +pub fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe { simd_add(a, vrshr_n_s32::(b)) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s32)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srsra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51568,19 +52765,21 @@ pub fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { - unsafe { transmute(a) } +pub fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe { simd_add(a, vrshrq_n_s32::(b)) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s64)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srsra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51589,19 +52788,21 @@ pub fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { - unsafe { transmute(a) } +pub fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(N >= 1 && N <= 64); + unsafe { simd_add(a, vrshr_n_s64::(b)) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] +#[doc = "Signed rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s64)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(srsra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51610,19 +52811,21 @@ pub fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { - unsafe { transmute(a) } +pub fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + unsafe { simd_add(a, vrshrq_n_s64::(b)) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u8)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ursra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51631,19 +52834,21 @@ pub fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { - unsafe { transmute(a) } +pub fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe { simd_add(a, vrshr_n_u8::(b)) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u8)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ursra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51652,19 +52857,21 @@ pub fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { - unsafe { transmute(a) } +pub fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + unsafe { simd_add(a, vrshrq_n_u8::(b)) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u16)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ursra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51673,19 +52880,21 @@ pub fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { - unsafe { transmute(a) } +pub fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe { simd_add(a, vrshr_n_u16::(b)) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u16)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ursra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51694,19 +52903,21 @@ pub fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { - unsafe { transmute(a) } +pub fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + unsafe { simd_add(a, vrshrq_n_u16::(b)) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u32)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ursra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51715,19 +52926,21 @@ pub fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { - unsafe { transmute(a) } +pub fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe { simd_add(a, vrshr_n_u32::(b)) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u32)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ursra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51736,19 +52949,21 @@ pub fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { - unsafe { transmute(a) } +pub fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe { simd_add(a, vrshrq_n_u32::(b)) } } -#[doc = "Vector reinterpret cast operation"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u64)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ursra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51757,19 +52972,21 @@ pub fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { - unsafe { transmute(a) } +pub fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 1 && N <= 64); + unsafe { simd_add(a, vrshr_n_u64::(b)) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16_p8)"] +#[doc = "Unsigned rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) + assert_instr(ursra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51778,18 +52995,19 @@ pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev16_p8(a: poly8x8_t) -> poly8x8_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } +pub fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + unsafe { simd_add(a, vrshrq_n_u64::(b)) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16_s8)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) + assert_instr(rsubhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -51799,18 +53017,26 @@ pub fn vrev16_p8(a: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev16_s8(a: int8x8_t) -> int8x8_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } +pub fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rsubhn.v8i8" + )] + fn _vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; + } + unsafe { _vrsubhn_s16(a, b) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16_u8)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) + assert_instr(rsubhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -51820,18 +53046,26 @@ pub fn vrev16_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev16_u8(a: uint8x8_t) -> uint8x8_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } +pub fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rsubhn.v4i16" + )] + fn _vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; + } + unsafe { _vrsubhn_s32(a, b) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16q_p8)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) + assert_instr(rsubhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -51841,18 +53075,26 @@ pub fn vrev16_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } +pub fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.rsubhn.v2i32" + )] + fn _vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; + } + unsafe { _vrsubhn_s64(a, b) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16q_s8)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) + assert_instr(rsubhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -51862,18 +53104,18 @@ pub fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev16q_s8(a: int8x16_t) -> int8x16_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } +pub fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + unsafe { transmute(vrsubhn_s16(transmute(a), transmute(b))) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16q_u8)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev16) + assert_instr(rsubhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -51883,18 +53125,18 @@ pub fn vrev16q_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } +pub fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + unsafe { transmute(vrsubhn_s32(transmute(a), transmute(b))) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_p16)"] +#[doc = "Rounding subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) + assert_instr(rsubhn) )] #[cfg_attr( not(target_arch = "arm"), @@ -51904,40 +53146,101 @@ pub fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev32_p16(a: poly16x4_t) -> poly16x4_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } +pub fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + unsafe { transmute(vrsubhn_s64(transmute(a), transmute(b))) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_p8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vset_lane_f16(a: f16, b: float16x4_t) -> float16x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_insert!(b, LANE as u32, a) } +} +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vset_lane_f16(a: f16, b: float16x4_t) -> float16x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float16x4_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } +} +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f16)"] +#[inline] +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) )] -pub fn vrev32_p8(a: poly8x8_t) -> poly8x8_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vsetq_lane_f16(a: f16, b: float16x8_t) -> float16x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_s16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f16)"] +#[inline] +#[cfg(target_endian = "big")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vsetq_lane_f16(a: f16, b: float16x8_t) -> float16x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: float16x8_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } +} +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51946,19 +53249,22 @@ pub fn vrev32_p8(a: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev32_s16(a: int16x4_t) -> int16x4_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } +pub fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_s8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51967,19 +53273,26 @@ pub fn vrev32_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev32_s8(a: int8x8_t) -> int8x8_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } +pub fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: float32x2_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_u16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -51988,19 +53301,22 @@ pub fn vrev32_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev32_u16(a: uint16x4_t) -> uint16x4_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } +pub fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_u8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52009,19 +53325,26 @@ pub fn vrev32_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev32_u8(a: uint8x8_t) -> uint8x8_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } +pub fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: float32x4_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_p16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52030,19 +53353,22 @@ pub fn vrev32_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } +pub fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_p8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52051,19 +53377,26 @@ pub fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } +pub fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_s16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52072,19 +53405,22 @@ pub fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev32q_s16(a: int16x8_t) -> int16x8_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } +pub fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(LANE, 4); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_s8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52093,19 +53429,31 @@ pub fn vrev32q_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev32q_s8(a: int8x16_t) -> int8x16_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } +pub fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(LANE, 4); + unsafe { + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x16_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_u16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52114,19 +53462,22 @@ pub fn vrev32q_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } +pub fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_u8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev32) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52135,19 +53486,26 @@ pub fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } +pub fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + let b: int16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int16x4_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_f32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52156,19 +53514,22 @@ pub fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64_f32(a: float32x2_t) -> float32x2_t { - unsafe { simd_shuffle!(a, a, [1, 0]) } +pub fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_p16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52177,19 +53538,26 @@ pub fn vrev64_f32(a: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64_p16(a: poly16x4_t) -> poly16x4_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) } +pub fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { + let b: int16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int16x8_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_p8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52198,19 +53566,22 @@ pub fn vrev64_p16(a: poly16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64_p8(a: poly8x8_t) -> poly8x8_t { - unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } +pub fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_s16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52219,19 +53590,26 @@ pub fn vrev64_p8(a: poly8x8_t) -> poly8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64_s16(a: int16x4_t) -> int16x4_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) } +pub fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { + let b: int32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int32x2_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_s32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52240,19 +53618,22 @@ pub fn vrev64_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64_s32(a: int32x2_t) -> int32x2_t { - unsafe { simd_shuffle!(a, a, [1, 0]) } +pub fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_s8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52261,19 +53642,26 @@ pub fn vrev64_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64_s8(a: int8x8_t) -> int8x8_t { - unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } +pub fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + let b: int32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: int32x4_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_u16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52282,19 +53670,22 @@ pub fn vrev64_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64_u16(a: uint16x4_t) -> uint16x4_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) } +pub fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_u32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52303,19 +53694,26 @@ pub fn vrev64_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64_u32(a: uint32x2_t) -> uint32x2_t { - unsafe { simd_shuffle!(a, a, [1, 0]) } +pub fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { + let b: int64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: int64x2_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_u8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52324,19 +53722,22 @@ pub fn vrev64_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64_u8(a: uint8x8_t) -> uint8x8_t { - unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } +pub fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_f32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52345,19 +53746,26 @@ pub fn vrev64_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64q_f32(a: float32x4_t) -> float32x4_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } +pub fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x8_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_p16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52366,19 +53774,22 @@ pub fn vrev64q_f32(a: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } +pub fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(LANE, 4); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_p8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52387,19 +53798,31 @@ pub fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t { - unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } +pub fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(LANE, 4); + unsafe { + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint8x16_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) + } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_s16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52408,19 +53831,22 @@ pub fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64q_s16(a: int16x8_t) -> int16x8_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } +pub fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_s32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52429,19 +53855,26 @@ pub fn vrev64q_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64q_s32(a: int32x4_t) -> int32x4_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } +pub fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + let b: uint16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint16x4_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_s8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52450,19 +53883,22 @@ pub fn vrev64q_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64q_s8(a: int8x16_t) -> int8x16_t { - unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } +pub fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_u16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52471,19 +53907,26 @@ pub fn vrev64q_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } +pub fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: uint16x8_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_u32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52492,19 +53935,22 @@ pub fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t { - unsafe { simd_shuffle!(a, a, [1, 0, 3, 2]) } +pub fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Reversing vector elements (swap endianness)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_u8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52513,63 +53959,78 @@ pub fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t { - unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } +pub fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { + let b: uint32x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint32x2_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [1, 0]) + } } -#[doc = "Reverse elements in 64-bit doublewords"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_f16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u32)"] #[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrev64))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] -#[target_feature(enable = "neon,fp16")] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vrev64_f16(a: float16x4_t) -> float16x4_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) } +pub fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Reverse elements in 64-bit doublewords"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_f16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u32)"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrev64))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rev64) + assert_instr(nop, LANE = 0) )] -#[target_feature(enable = "neon,fp16")] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vrev64q_f16(a: float16x8_t) -> float16x8_t { - unsafe { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } +pub fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: uint32x4_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u64)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52578,27 +54039,22 @@ pub fn vrev64q_f16(a: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i8")] - fn _vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - unsafe { _vrhadd_s8(a, b) } +pub fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u64)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52607,27 +54063,26 @@ pub fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v16i8")] - fn _vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; +pub fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: uint64x2_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } - unsafe { _vrhaddq_s8(a, b) } } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52636,27 +54091,22 @@ pub fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i16")] - fn _vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - } - unsafe { _vrhadd_s16(a, b) } +pub fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52665,27 +54115,26 @@ pub fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i16")] - fn _vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; +pub fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { + let b: poly8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x8_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - unsafe { _vrhaddq_s16(a, b) } } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52694,27 +54143,22 @@ pub fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v2i32")] - fn _vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - } - unsafe { _vrhadd_s32(a, b) } +pub fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(LANE, 4); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srhadd) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52723,27 +54167,31 @@ pub fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srhadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i32")] - fn _vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; +pub fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(LANE, 4); + unsafe { + let b: poly8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly8x16_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!( + ret_val, + ret_val, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ) } - unsafe { _vrhaddq_s32(a, b) } } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52752,27 +54200,22 @@ pub fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i8")] - fn _vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - } - unsafe { _vrhadd_u8(a, b) } +pub fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u8)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52781,27 +54224,26 @@ pub fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v16i8")] - fn _vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; +pub fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(LANE, 2); + unsafe { + let b: poly16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let ret_val: poly16x4_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vrhaddq_u8(a, b) } } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52810,27 +54252,22 @@ pub fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v4i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i16")] - fn _vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - } - unsafe { _vrhadd_u16(a, b) } +pub fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52839,27 +54276,25 @@ pub fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v8i16" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i16")] - fn _vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; +pub fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(LANE, 3); + unsafe { + let b: poly16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: poly16x8_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } - unsafe { _vrhaddq_u16(a, b) } } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p64)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52868,27 +54303,21 @@ pub fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v2i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v2i32")] - fn _vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - } - unsafe { _vrhadd_u32(a, b) } +pub fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_t { + static_assert!(LANE == 0); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Rounding halving add"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urhadd) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52897,85 +54326,69 @@ pub fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urhadd.v4i32" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i32")] - fn _vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - } - unsafe { _vrhaddq_u32(a, b) } +pub fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t { + static_assert!(LANE == 0); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u64)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frintn) + assert_instr(nop, LANE = 0) )] -#[target_feature(enable = "neon,fp16")] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vrndn_f16(a: float16x4_t) -> float16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec", target_arch = "arm"), - link_name = "llvm.roundeven.v4f16" - )] - fn _vrndn_f16(a: float16x4_t) -> float16x4_t; - } - unsafe { _vrndn_f16(a) } +pub fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_t { + static_assert!(LANE == 0); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f16)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p64)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frintn) + assert_instr(nop, LANE = 0) )] -#[target_feature(enable = "neon,fp16")] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vrndnq_f16(a: float16x8_t) -> float16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec", target_arch = "arm"), - link_name = "llvm.roundeven.v8f16" - )] - fn _vrndnq_f16(a: float16x8_t) -> float16x8_t; - } - unsafe { _vrndnq_f16(a) } +pub fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { simd_insert!(b, LANE as u32, a) } } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f32)"] +#[doc = "Insert vector element from another vector element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p64)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frintn) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -52984,632 +54397,807 @@ pub fn vrndnq_f16(a: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrndn_f32(a: float32x2_t) -> float32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec", target_arch = "arm"), - link_name = "llvm.roundeven.v2f32" - )] - fn _vrndn_f32(a: float32x2_t) -> float32x2_t; +pub fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2_t { + static_assert_uimm_bits!(LANE, 1); + unsafe { + let b: poly64x2_t = simd_shuffle!(b, b, [1, 0]); + let ret_val: poly64x2_t = simd_insert!(b, LANE as u32, a); + simd_shuffle!(ret_val, ret_val, [1, 0]) } - unsafe { _vrndn_f32(a) } } -#[doc = "Floating-point round to integral, to nearest with ties to even"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f32)"] +#[doc = "SHA1 hash update accelerator, choose."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1cq_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1c))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frintn) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrndnq_f32(a: float32x4_t) -> float32x4_t { +pub fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec", target_arch = "arm"), - link_name = "llvm.roundeven.v4f32" + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha1c" )] - fn _vrndnq_f32(a: float32x4_t) -> float32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1c")] + fn _vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t; } - unsafe { _vrndnq_f32(a) } + unsafe { _vsha1cq_u32(hash_abcd, hash_e, wk) } } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s8)"] +#[doc = "SHA1 hash update accelerator, choose."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1cq_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1c))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { +pub fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v8i8" + link_name = "llvm.aarch64.crypto.sha1c" )] - fn _vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1c")] + fn _vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t; + } + unsafe { + let hash_abcd: uint32x4_t = simd_shuffle!(hash_abcd, hash_abcd, [3, 2, 1, 0]); + let wk: uint32x4_t = simd_shuffle!(wk, wk, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsha1cq_u32(hash_abcd, hash_e, wk); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vrshl_s8(a, b) } } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s8)"] +#[doc = "SHA1 fixed rotate."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1h_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1h))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +pub fn vsha1h_u32(hash_e: u32) -> u32 { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v16i8" + link_name = "llvm.aarch64.crypto.sha1h" )] - fn _vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1h")] + fn _vsha1h_u32(hash_e: u32) -> u32; } - unsafe { _vrshlq_s8(a, b) } + unsafe { _vsha1h_u32(hash_e) } } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s16)"] +#[doc = "SHA1 hash update accelerator, majority"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1mq_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1m))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { +pub fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v4i16" + link_name = "llvm.aarch64.crypto.sha1m" )] - fn _vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1m")] + fn _vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t; } - unsafe { _vrshl_s16(a, b) } + unsafe { _vsha1mq_u32(hash_abcd, hash_e, wk) } } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s16)"] +#[doc = "SHA1 hash update accelerator, majority"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1mq_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1m))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { +pub fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v8i16" + link_name = "llvm.aarch64.crypto.sha1m" )] - fn _vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1m")] + fn _vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t; + } + unsafe { + let hash_abcd: uint32x4_t = simd_shuffle!(hash_abcd, hash_abcd, [3, 2, 1, 0]); + let wk: uint32x4_t = simd_shuffle!(wk, wk, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsha1mq_u32(hash_abcd, hash_e, wk); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vrshlq_s16(a, b) } } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s32)"] +#[doc = "SHA1 hash update accelerator, parity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1pq_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1p))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { +pub fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v2i32" + link_name = "llvm.aarch64.crypto.sha1p" )] - fn _vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1p")] + fn _vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t; } - unsafe { _vrshl_s32(a, b) } + unsafe { _vsha1pq_u32(hash_abcd, hash_e, wk) } } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s32)"] +#[doc = "SHA1 hash update accelerator, parity"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1pq_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1p))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] +pub fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v4i32" + link_name = "llvm.aarch64.crypto.sha1p" )] - fn _vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1p")] + fn _vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t; + } + unsafe { + let hash_abcd: uint32x4_t = simd_shuffle!(hash_abcd, hash_abcd, [3, 2, 1, 0]); + let wk: uint32x4_t = simd_shuffle!(wk, wk, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsha1pq_u32(hash_abcd, hash_e, wk); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vrshlq_s32(a, b) } } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s64)"] +#[doc = "SHA1 schedule update accelerator, first part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su0q_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1su0))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { +pub fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v1i64" + link_name = "llvm.aarch64.crypto.sha1su0" )] - fn _vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su0")] + fn _vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t; } - unsafe { _vrshl_s64(a, b) } + unsafe { _vsha1su0q_u32(w0_3, w4_7, w8_11) } } -#[doc = "Signed rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s64)"] +#[doc = "SHA1 schedule update accelerator, first part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su0q_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1su0))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshl) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { +pub fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.srshl.v2i64" + link_name = "llvm.aarch64.crypto.sha1su0" )] - fn _vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su0")] + fn _vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t; + } + unsafe { + let w0_3: uint32x4_t = simd_shuffle!(w0_3, w0_3, [3, 2, 1, 0]); + let w4_7: uint32x4_t = simd_shuffle!(w4_7, w4_7, [3, 2, 1, 0]); + let w8_11: uint32x4_t = simd_shuffle!(w8_11, w8_11, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsha1su0q_u32(w0_3, w4_7, w8_11); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vrshlq_s64(a, b) } } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u8)"] +#[doc = "SHA1 schedule update accelerator, second part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su1q_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1su1))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { +pub fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v8i8" + link_name = "llvm.aarch64.crypto.sha1su1" )] - fn _vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su1")] + fn _vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t; } - unsafe { _vrshl_u8(a, b) } + unsafe { _vsha1su1q_u32(tw0_3, w12_15) } } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u8)"] +#[doc = "SHA1 schedule update accelerator, second part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su1q_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha1su1))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { +pub fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v16i8" + link_name = "llvm.aarch64.crypto.sha1su1" )] - fn _vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su1")] + fn _vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t; + } + unsafe { + let tw0_3: uint32x4_t = simd_shuffle!(tw0_3, tw0_3, [3, 2, 1, 0]); + let w12_15: uint32x4_t = simd_shuffle!(w12_15, w12_15, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsha1su1q_u32(tw0_3, w12_15); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vrshlq_u8(a, b) } } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u16)"] +#[doc = "SHA1 schedule update accelerator, upper part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256h2q_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256h2))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { +pub fn vsha256h2q_u32(hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v4i16" + link_name = "llvm.aarch64.crypto.sha256h2" )] - fn _vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h2")] + fn _vsha256h2q_u32( + hash_abcd: uint32x4_t, + hash_efgh: uint32x4_t, + wk: uint32x4_t, + ) -> uint32x4_t; } - unsafe { _vrshl_u16(a, b) } + unsafe { _vsha256h2q_u32(hash_abcd, hash_efgh, wk) } } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u16)"] +#[doc = "SHA1 schedule update accelerator, upper part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256h2q_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256h2))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { +pub fn vsha256h2q_u32(hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v8i16" + link_name = "llvm.aarch64.crypto.sha256h2" )] - fn _vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h2")] + fn _vsha256h2q_u32( + hash_abcd: uint32x4_t, + hash_efgh: uint32x4_t, + wk: uint32x4_t, + ) -> uint32x4_t; + } + unsafe { + let hash_abcd: uint32x4_t = simd_shuffle!(hash_abcd, hash_abcd, [3, 2, 1, 0]); + let hash_efgh: uint32x4_t = simd_shuffle!(hash_efgh, hash_efgh, [3, 2, 1, 0]); + let wk: uint32x4_t = simd_shuffle!(wk, wk, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsha256h2q_u32(hash_abcd, hash_efgh, wk); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vrshlq_u16(a, b) } } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u32)"] +#[doc = "SHA1 schedule update accelerator, first part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256hq_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256h))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { +pub fn vsha256hq_u32(hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v2i32" + link_name = "llvm.aarch64.crypto.sha256h" )] - fn _vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h")] + fn _vsha256hq_u32( + hash_abcd: uint32x4_t, + hash_efgh: uint32x4_t, + wk: uint32x4_t, + ) -> uint32x4_t; } - unsafe { _vrshl_u32(a, b) } + unsafe { _vsha256hq_u32(hash_abcd, hash_efgh, wk) } } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u32)"] +#[doc = "SHA1 schedule update accelerator, first part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256hq_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256h))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { +pub fn vsha256hq_u32(hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v4i32" + link_name = "llvm.aarch64.crypto.sha256h" )] - fn _vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h")] + fn _vsha256hq_u32( + hash_abcd: uint32x4_t, + hash_efgh: uint32x4_t, + wk: uint32x4_t, + ) -> uint32x4_t; + } + unsafe { + let hash_abcd: uint32x4_t = simd_shuffle!(hash_abcd, hash_abcd, [3, 2, 1, 0]); + let hash_efgh: uint32x4_t = simd_shuffle!(hash_efgh, hash_efgh, [3, 2, 1, 0]); + let wk: uint32x4_t = simd_shuffle!(wk, wk, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsha256hq_u32(hash_abcd, hash_efgh, wk); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vrshlq_u32(a, b) } } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u64)"] +#[doc = "SHA256 schedule update accelerator, first part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su0q_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256su0))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { +pub fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v1i64" + link_name = "llvm.aarch64.crypto.sha256su0" )] - fn _vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su0")] + fn _vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t; } - unsafe { _vrshl_u64(a, b) } + unsafe { _vsha256su0q_u32(w0_3, w4_7) } } -#[doc = "Unsigned rounding shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u64)"] +#[doc = "SHA256 schedule update accelerator, first part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su0q_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256su0))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshl) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { +pub fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.urshl.v2i64" + link_name = "llvm.aarch64.crypto.sha256su0" )] - fn _vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su0")] + fn _vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t; + } + unsafe { + let w0_3: uint32x4_t = simd_shuffle!(w0_3, w0_3, [3, 2, 1, 0]); + let w4_7: uint32x4_t = simd_shuffle!(w4_7, w4_7, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsha256su0q_u32(w0_3, w4_7); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } - unsafe { _vrshlq_u64(a, b) } } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s8)"] +#[doc = "SHA256 schedule update accelerator, second part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su1q_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] +#[cfg(target_endian = "little")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256su1))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - vrshl_s8(a, vdup_n_s8(-N as _)) -} -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - vrshlq_s8(a, vdupq_n_s8(-N as _)) +pub fn vsha256su1q_u32(tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha256su1" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su1")] + fn _vsha256su1q_u32(tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t) + -> uint32x4_t; + } + unsafe { _vsha256su1q_u32(tw0_3, w8_11, w12_15) } } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s16)"] +#[doc = "SHA256 schedule update accelerator, second part."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su1q_u32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "sha2")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(test, assert_instr(sha256su1))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] -pub fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - vrshl_s16(a, vdup_n_s16(-N as _)) +pub fn vsha256su1q_u32(tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.crypto.sha256su1" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su1")] + fn _vsha256su1q_u32(tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t) + -> uint32x4_t; + } + unsafe { + let tw0_3: uint32x4_t = simd_shuffle!(tw0_3, tw0_3, [3, 2, 1, 0]); + let w8_11: uint32x4_t = simd_shuffle!(w8_11, w8_11, [3, 2, 1, 0]); + let w12_15: uint32x4_t = simd_shuffle!(w12_15, w12_15, [3, 2, 1, 0]); + let ret_val: uint32x4_t = _vsha256su1q_u32(tw0_3, w8_11, w12_15); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) + } } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s16)"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - vrshlq_s16(a, vdupq_n_s16(-N as _)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[rustc_legacy_const_generics(2)] +fn vshiftlins_v16i8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v16i8")] + fn _vshiftlins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + unsafe { _vshiftlins_v16i8(a, b, const { int8x16_t([N as i8; 16]) }) } } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s32)"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - vrshl_s32(a, vdup_n_s32(-N as _)) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[rustc_legacy_const_generics(2)] +fn vshiftlins_v1i64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v1i64")] + fn _vshiftlins_v1i64(a: int64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t; + } + unsafe { _vshiftlins_v1i64(a, b, const { int64x1_t([N as i64; 1]) }) } } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s32)"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[rustc_legacy_const_generics(2)] +fn vshiftlins_v2i32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i32")] + fn _vshiftlins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; + } + unsafe { _vshiftlins_v2i32(a, b, const { int32x2_t([N; 2]) }) } +} +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[rustc_legacy_const_generics(2)] +fn vshiftlins_v2i64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i64")] + fn _vshiftlins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + unsafe { _vshiftlins_v2i64(a, b, const { int64x2_t([N as i64; 2]) }) } +} +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[rustc_legacy_const_generics(2)] +fn vshiftlins_v4i16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i16")] + fn _vshiftlins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; + } + unsafe { _vshiftlins_v4i16(a, b, const { int16x4_t([N as i16; 4]) }) } +} +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[rustc_legacy_const_generics(2)] +fn vshiftlins_v4i32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i32")] + fn _vshiftlins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + unsafe { _vshiftlins_v4i32(a, b, const { int32x4_t([N; 4]) }) } +} +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[rustc_legacy_const_generics(2)] +fn vshiftlins_v8i16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i16")] + fn _vshiftlins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + } + unsafe { _vshiftlins_v8i16(a, b, const { int16x8_t([N as i16; 8]) }) } +} +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[rustc_legacy_const_generics(2)] +fn vshiftlins_v8i8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i8")] + fn _vshiftlins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; + } + unsafe { _vshiftlins_v8i8(a, b, const { int8x8_t([N as i8; 8]) }) } +} +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v16i8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[rustc_legacy_const_generics(2)] +fn vshiftrins_v16i8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v16i8")] + fn _vshiftrins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; + } + unsafe { _vshiftrins_v16i8(a, b, const { int8x16_t([-N as i8; 16]) }) } +} +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v1i64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[rustc_legacy_const_generics(2)] +fn vshiftrins_v1i64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v1i64")] + fn _vshiftrins_v1i64(a: int64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t; + } + unsafe { _vshiftrins_v1i64(a, b, const { int64x1_t([-N as i64; 1]) }) } +} +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v2i32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[rustc_legacy_const_generics(2)] +fn vshiftrins_v2i32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i32")] + fn _vshiftrins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; + } + unsafe { _vshiftrins_v2i32(a, b, const { int32x2_t([-N; 2]) }) } +} +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v2i64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[rustc_legacy_const_generics(2)] +fn vshiftrins_v2i64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i64")] + fn _vshiftrins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; + } + unsafe { _vshiftrins_v2i64(a, b, const { int64x2_t([-N as i64; 2]) }) } +} +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v4i16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[rustc_legacy_const_generics(2)] +fn vshiftrins_v4i16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i16")] + fn _vshiftrins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; + } + unsafe { _vshiftrins_v4i16(a, b, const { int16x4_t([-N as i16; 4]) }) } +} +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v4i32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[rustc_legacy_const_generics(2)] +fn vshiftrins_v4i32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i32")] + fn _vshiftrins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; + } + unsafe { _vshiftrins_v4i32(a, b, const { int32x4_t([-N; 4]) }) } +} +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v8i16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[rustc_legacy_const_generics(2)] +fn vshiftrins_v8i16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i16")] + fn _vshiftrins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; + } + unsafe { _vshiftrins_v8i16(a, b, const { int16x8_t([-N as i16; 8]) }) } +} +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v8i8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[rustc_legacy_const_generics(2)] +fn vshiftrins_v8i8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i8")] + fn _vshiftrins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; + } + unsafe { _vshiftrins_v8i8(a, b, const { int8x8_t([-N as i8; 8]) }) } +} +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(shl, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -53620,19 +55208,19 @@ pub fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - vrshlq_s32(a, vdupq_n_s32(-N as _)) +pub fn vshl_n_s8(a: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shl(a, vdup_n_s8(N as _)) } } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s64)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + assert_instr(shl, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -53643,19 +55231,19 @@ pub fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { - static_assert!(N >= 1 && N <= 64); - vrshl_s64(a, vdup_n_s64(-N as _)) +pub fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shl(a, vdupq_n_s8(N as _)) } } -#[doc = "Signed rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s64)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srshr, N = 2) + assert_instr(shl, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -53666,19 +55254,19 @@ pub fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - vrshlq_s64(a, vdupq_n_s64(-N as _)) +pub fn vshl_n_s16(a: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 4); + unsafe { simd_shl(a, vdup_n_s16(N as _)) } } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u8)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(shl, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -53689,19 +55277,19 @@ pub fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - vrshl_u8(a, vdup_n_s8(-N as _)) +pub fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 4); + unsafe { simd_shl(a, vdupq_n_s16(N as _)) } } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u8)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(shl, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -53712,19 +55300,19 @@ pub fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - vrshlq_u8(a, vdupq_n_s8(-N as _)) +pub fn vshl_n_s32(a: int32x2_t) -> int32x2_t { + static_assert_uimm_bits!(N, 5); + unsafe { simd_shl(a, vdup_n_s32(N as _)) } } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u16)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(shl, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -53735,19 +55323,19 @@ pub fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - vrshl_u16(a, vdup_n_s16(-N as _)) +pub fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { + static_assert_uimm_bits!(N, 5); + unsafe { simd_shl(a, vdupq_n_s32(N as _)) } } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u16)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(shl, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -53758,19 +55346,19 @@ pub fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - vrshlq_u16(a, vdupq_n_s16(-N as _)) +pub fn vshl_n_s64(a: int64x1_t) -> int64x1_t { + static_assert_uimm_bits!(N, 6); + unsafe { simd_shl(a, vdup_n_s64(N as _)) } } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u32)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(shl, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -53781,19 +55369,19 @@ pub fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - vrshl_u32(a, vdup_n_s32(-N as _)) +pub fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { + static_assert_uimm_bits!(N, 6); + unsafe { simd_shl(a, vdupq_n_s64(N as _)) } } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u32)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(shl, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -53804,19 +55392,19 @@ pub fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - vrshlq_u32(a, vdupq_n_s32(-N as _)) +pub fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shl(a, vdup_n_u8(N as _)) } } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u64)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(shl, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -53827,19 +55415,19 @@ pub fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 1 && N <= 64); - vrshl_u64(a, vdup_n_s64(-N as _)) +pub fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + unsafe { simd_shl(a, vdupq_n_u8(N as _)) } } -#[doc = "Unsigned rounding shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u64)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(urshr, N = 2) + assert_instr(shl, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -53850,124 +55438,19 @@ pub fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - vrshlq_u64(a, vdupq_n_s64(-N as _)) -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")] - fn _vrshrn_n_s16(a: int16x8_t, n: int16x8_t) -> int8x8_t; - } - unsafe { _vrshrn_n_s16(a, const { int16x8_t([-N as i16; 8]) }) } -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")] - fn _vrshrn_n_s32(a: int32x4_t, n: int32x4_t) -> int16x4_t; - } - unsafe { _vrshrn_n_s32(a, const { int32x4_t([-N; 4]) }) } -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vrshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")] - fn _vrshrn_n_s64(a: int64x2_t, n: int64x2_t) -> int32x2_t; - } - unsafe { _vrshrn_n_s64(a, const { int64x2_t([-N as i64; 2]) }) } -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(rshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rshrn.v8i8" - )] - fn _vrshrn_n_s16(a: int16x8_t, n: i32) -> int8x8_t; - } - unsafe { _vrshrn_n_s16(a, N) } -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(rshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rshrn.v4i16" - )] - fn _vrshrn_n_s32(a: int32x4_t, n: i32) -> int16x4_t; - } - unsafe { _vrshrn_n_s32(a, N) } -} -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(rshrn, N = 2))] -#[rustc_legacy_const_generics(1)] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rshrn.v2i32" - )] - fn _vrshrn_n_s64(a: int64x2_t, n: i32) -> int32x2_t; - } - unsafe { _vrshrn_n_s64(a, N) } +pub fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + unsafe { simd_shl(a, vdup_n_u16(N as _)) } } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u16)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rshrn, N = 2) + assert_instr(shl, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -53978,19 +55461,19 @@ pub fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe { transmute(vrshrn_n_s16::(transmute(a))) } +pub fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + unsafe { simd_shl(a, vdupq_n_u16(N as _)) } } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u32)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rshrn, N = 2) + assert_instr(shl, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -54001,19 +55484,19 @@ pub fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe { transmute(vrshrn_n_s32::(transmute(a))) } +pub fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { + static_assert_uimm_bits!(N, 5); + unsafe { simd_shl(a, vdup_n_u32(N as _)) } } -#[doc = "Rounding shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u64)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rshrn, N = 2) + assert_instr(shl, N = 2) )] #[rustc_legacy_const_generics(1)] #[cfg_attr( @@ -54024,79 +55507,65 @@ pub fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe { transmute(vrshrn_n_s64::(transmute(a))) } +pub fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { + static_assert_uimm_bits!(N, 5); + unsafe { simd_shl(a, vdupq_n_u32(N as _)) } } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f16)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u64)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrte) + assert_instr(shl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vrsqrte_f16(a: float16x4_t) -> float16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.v4f16" - )] - fn _vrsqrte_f16(a: float16x4_t) -> float16x4_t; - } - unsafe { _vrsqrte_f16(a) } +pub fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t { + static_assert_uimm_bits!(N, 6); + unsafe { simd_shl(a, vdup_n_u64(N as _)) } } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f16)"] +#[doc = "Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u64)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrte) + assert_instr(shl, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vrsqrteq_f16(a: float16x8_t) -> float16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v8f16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.v8f16" - )] - fn _vrsqrteq_f16(a: float16x8_t) -> float16x8_t; - } - unsafe { _vrsqrteq_f16(a) } +pub fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { + static_assert_uimm_bits!(N, 6); + unsafe { simd_shl(a, vdupq_n_u64(N as _)) } } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f32)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrte) + assert_instr(sshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -54106,26 +55575,26 @@ pub fn vrsqrteq_f16(a: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { +pub fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.v2f32" + link_name = "llvm.aarch64.neon.sshl.v8i8" )] - fn _vrsqrte_f32(a: float32x2_t) -> float32x2_t; + fn _vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; } - unsafe { _vrsqrte_f32(a) } + unsafe { _vshl_s8(a, b) } } -#[doc = "Reciprocal square-root estimate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f32)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrte) + assert_instr(sshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -54135,26 +55604,26 @@ pub fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { +pub fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrte.v4f32" + link_name = "llvm.aarch64.neon.sshl.v16i8" )] - fn _vrsqrteq_f32(a: float32x4_t) -> float32x4_t; + fn _vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; } - unsafe { _vrsqrteq_f32(a) } + unsafe { _vshlq_s8(a, b) } } -#[doc = "Unsigned reciprocal square root estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_u32)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursqrte) + assert_instr(sshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -54164,26 +55633,26 @@ pub fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { +pub fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ursqrte.v2i32" + link_name = "llvm.aarch64.neon.sshl.v4i16" )] - fn _vrsqrte_u32(a: uint32x2_t) -> uint32x2_t; + fn _vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; } - unsafe { _vrsqrte_u32(a) } + unsafe { _vshl_s16(a, b) } } -#[doc = "Unsigned reciprocal square root estimate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_u32)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursqrte) + assert_instr(sshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -54193,86 +55662,84 @@ pub fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { +pub fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4i32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ursqrte.v4i32" + link_name = "llvm.aarch64.neon.sshl.v8i16" )] - fn _vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t; + fn _vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; } - unsafe { _vrsqrteq_u32(a) } + unsafe { _vshlq_s16(a, b) } } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f16)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s32)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrts) + assert_instr(sshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vrsqrts_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { +pub fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v4f16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.v4f16" + link_name = "llvm.aarch64.neon.sshl.v2i32" )] - fn _vrsqrts_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t; + fn _vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; } - unsafe { _vrsqrts_f16(a, b) } + unsafe { _vshl_s32(a, b) } } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f16)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s32)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrts) + assert_instr(sshl) )] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vrsqrtsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { +pub fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v8f16")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.v8f16" + link_name = "llvm.aarch64.neon.sshl.v4i32" )] - fn _vrsqrtsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t; + fn _vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; } - unsafe { _vrsqrtsq_f16(a, b) } + unsafe { _vshlq_s32(a, b) } } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f32)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrts) + assert_instr(sshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -54282,26 +55749,26 @@ pub fn vrsqrtsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { +pub fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v2f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v1i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.v2f32" + link_name = "llvm.aarch64.neon.sshl.v1i64" )] - fn _vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; + fn _vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; } - unsafe { _vrsqrts_f32(a, b) } + unsafe { _vshl_s64(a, b) } } -#[doc = "Floating-point reciprocal square root step"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f32)"] +#[doc = "Signed Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(frsqrts) + assert_instr(sshl) )] #[cfg_attr( not(target_arch = "arm"), @@ -54311,28 +55778,27 @@ pub fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { +pub fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v4f32")] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.frsqrts.v4f32" + link_name = "llvm.aarch64.neon.sshl.v2i64" )] - fn _vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; + fn _vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; } - unsafe { _vrsqrtsq_f32(a, b) } + unsafe { _vshlq_s64(a, b) } } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s8)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(ushl) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54341,21 +55807,27 @@ pub fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe { simd_add(a, vrshr_n_s8::(b)) } +pub fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v8i8" + )] + fn _vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; + } + unsafe { _vshl_u8(a, b) } } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s8)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(ushl) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54364,21 +55836,27 @@ pub fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - unsafe { simd_add(a, vrshrq_n_s8::(b)) } +pub fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v16i8")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v16i8" + )] + fn _vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; + } + unsafe { _vshlq_u8(a, b) } } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s16)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(ushl) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54387,21 +55865,27 @@ pub fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe { simd_add(a, vrshr_n_s16::(b)) } +pub fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v4i16" + )] + fn _vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; + } + unsafe { _vshl_u16(a, b) } } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s16)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(ushl) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54410,21 +55894,27 @@ pub fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - unsafe { simd_add(a, vrshrq_n_s16::(b)) } +pub fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i16")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v8i16" + )] + fn _vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; + } + unsafe { _vshlq_u16(a, b) } } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s32)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(ushl) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54433,21 +55923,27 @@ pub fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe { simd_add(a, vrshr_n_s32::(b)) } +pub fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v2i32" + )] + fn _vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; + } + unsafe { _vshl_u32(a, b) } } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s32)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(ushl) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54456,21 +55952,27 @@ pub fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe { simd_add(a, vrshrq_n_s32::(b)) } +pub fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i32")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v4i32" + )] + fn _vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; + } + unsafe { _vshlq_u32(a, b) } } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s64)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(ushl) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54479,21 +55981,27 @@ pub fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N >= 1 && N <= 64); - unsafe { simd_add(a, vrshr_n_s64::(b)) } +pub fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v1i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v1i64" + )] + fn _vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; + } + unsafe { _vshl_u64(a, b) } } -#[doc = "Signed rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s64)"] +#[doc = "Unsigned Shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(srsra, N = 2) + assert_instr(ushl) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54502,21 +56010,28 @@ pub fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe { simd_add(a, vrshrq_n_s64::(b)) } +pub fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i64")] + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.ushl.v2i64" + )] + fn _vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; + } + unsafe { _vshlq_u64(a, b) } } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u8)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(sshll, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54525,21 +56040,21 @@ pub fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe { simd_add(a, vrshr_n_u8::(b)) } +pub fn vshll_n_s16(a: int16x4_t) -> int32x4_t { + static_assert!(N >= 0 && N <= 16); + unsafe { simd_shl(simd_cast(a), vdupq_n_s32(N as _)) } } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u8)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(sshll, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54548,21 +56063,21 @@ pub fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - unsafe { simd_add(a, vrshrq_n_u8::(b)) } +pub fn vshll_n_s32(a: int32x2_t) -> int64x2_t { + static_assert!(N >= 0 && N <= 32); + unsafe { simd_shl(simd_cast(a), vdupq_n_s64(N as _)) } } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u16)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s8", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(sshll, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54571,21 +56086,21 @@ pub fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe { simd_add(a, vrshr_n_u16::(b)) } +pub fn vshll_n_s8(a: int8x8_t) -> int16x8_t { + static_assert!(N >= 0 && N <= 8); + unsafe { simd_shl(simd_cast(a), vdupq_n_s16(N as _)) } } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u16)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(ushll, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54594,21 +56109,21 @@ pub fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - unsafe { simd_add(a, vrshrq_n_u16::(b)) } +pub fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { + static_assert!(N >= 0 && N <= 16); + unsafe { simd_shl(simd_cast(a), vdupq_n_u32(N as _)) } } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u32)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(ushll, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54617,21 +56132,21 @@ pub fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe { simd_add(a, vrshr_n_u32::(b)) } +pub fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { + static_assert!(N >= 0 && N <= 32); + unsafe { simd_shl(simd_cast(a), vdupq_n_u64(N as _)) } } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u32)"] +#[doc = "Signed shift left long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u8", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(ushll, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54640,21 +56155,21 @@ pub fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe { simd_add(a, vrshrq_n_u32::(b)) } +pub fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { + static_assert!(N >= 0 && N <= 8); + unsafe { simd_shl(simd_cast(a), vdupq_n_u16(N as _)) } } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u64)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(sshr, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54663,21 +56178,22 @@ pub fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 1 && N <= 64); - unsafe { simd_add(a, vrshr_n_u64::(b)) } +pub fn vshr_n_s8(a: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + let n: i32 = if N == 8 { 7 } else { N }; + unsafe { simd_shr(a, vdup_n_s8(n as _)) } } -#[doc = "Unsigned rounding shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u64)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ursra, N = 2) + assert_instr(sshr, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54686,20 +56202,22 @@ pub fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe { simd_add(a, vrshrq_n_u64::(b)) } +pub fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + let n: i32 = if N == 8 { 7 } else { N }; + unsafe { simd_shr(a, vdupq_n_s8(n as _)) } } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s16)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(sshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54708,27 +56226,22 @@ pub fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v8i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rsubhn.v8i8" - )] - fn _vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; - } - unsafe { _vrsubhn_s16(a, b) } +pub fn vshr_n_s16(a: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + let n: i32 = if N == 16 { 15 } else { N }; + unsafe { simd_shr(a, vdup_n_s16(n as _)) } } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s32)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(sshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54737,27 +56250,22 @@ pub fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rsubhn.v4i16" - )] - fn _vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; - } - unsafe { _vrsubhn_s32(a, b) } +pub fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + let n: i32 = if N == 16 { 15 } else { N }; + unsafe { simd_shr(a, vdupq_n_s16(n as _)) } } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s64)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(sshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54766,27 +56274,22 @@ pub fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.rsubhn.v2i32" - )] - fn _vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; - } - unsafe { _vrsubhn_s64(a, b) } +pub fn vshr_n_s32(a: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + let n: i32 = if N == 32 { 31 } else { N }; + unsafe { simd_shr(a, vdup_n_s32(n as _)) } } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u16)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(sshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54795,19 +56298,22 @@ pub fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { - unsafe { transmute(vrsubhn_s16(transmute(a), transmute(b))) } +pub fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + let n: i32 = if N == 32 { 31 } else { N }; + unsafe { simd_shr(a, vdupq_n_s32(n as _)) } } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u32)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(sshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54816,19 +56322,22 @@ pub fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { - unsafe { transmute(vrsubhn_s32(transmute(a), transmute(b))) } +pub fn vshr_n_s64(a: int64x1_t) -> int64x1_t { + static_assert!(N >= 1 && N <= 64); + let n: i32 = if N == 64 { 63 } else { N }; + unsafe { simd_shr(a, vdup_n_s64(n as _)) } } -#[doc = "Rounding subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u64)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(rsubhn) + assert_instr(sshr, N = 2) )] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54837,54 +56346,22 @@ pub fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { - unsafe { transmute(vrsubhn_s64(transmute(a), transmute(b))) } -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vset_lane_f16(a: f16, b: float16x4_t) -> float16x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { simd_insert!(b, LANE as u32, a) } -} -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vsetq_lane_f16(a: f16, b: float16x8_t) -> float16x8_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + let n: i32 = if N == 64 { 63 } else { N }; + unsafe { simd_shr(a, vdupq_n_s64(n as _)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f32)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(ushr, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54893,21 +56370,26 @@ pub fn vsetq_lane_f16(a: f16, b: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + let n: i32 = if N == 8 { + return vdup_n_u8(0); + } else { + N + }; + unsafe { simd_shr(a, vdup_n_u8(n as _)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f32)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(ushr, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54916,21 +56398,26 @@ pub fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + let n: i32 = if N == 8 { + return vdupq_n_u8(0); + } else { + N + }; + unsafe { simd_shr(a, vdupq_n_u8(n as _)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s8)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(ushr, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54939,21 +56426,26 @@ pub fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + let n: i32 = if N == 16 { + return vdup_n_u16(0); + } else { + N + }; + unsafe { simd_shr(a, vdup_n_u16(n as _)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s8)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(ushr, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54962,21 +56454,26 @@ pub fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(LANE, 4); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + let n: i32 = if N == 16 { + return vdupq_n_u16(0); + } else { + N + }; + unsafe { simd_shr(a, vdupq_n_u16(n as _)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s16)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(ushr, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -54985,21 +56482,26 @@ pub fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + let n: i32 = if N == 32 { + return vdup_n_u32(0); + } else { + N + }; + unsafe { simd_shr(a, vdup_n_u32(n as _)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s16)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(ushr, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -55008,21 +56510,26 @@ pub fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + let n: i32 = if N == 32 { + return vdupq_n_u32(0); + } else { + N + }; + unsafe { simd_shr(a, vdupq_n_u32(n as _)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s32)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(ushr, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -55031,21 +56538,26 @@ pub fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 1 && N <= 64); + let n: i32 = if N == 64 { + return vdup_n_u64(0); + } else { + N + }; + unsafe { simd_shr(a, vdup_n_u64(n as _)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s32)"] +#[doc = "Shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(ushr, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -55054,21 +56566,26 @@ pub fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + let n: i32 = if N == 64 { + return vdupq_n_u64(0); + } else { + N + }; + unsafe { simd_shr(a, vdupq_n_u64(n as _)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s64)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(shrn, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -55077,21 +56594,21 @@ pub fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe { simd_cast(simd_shr(a, vdupq_n_s16(N as _))) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u8)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(shrn, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -55100,21 +56617,21 @@ pub fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe { simd_cast(simd_shr(a, vdupq_n_s32(N as _))) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u8)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(shrn, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -55123,21 +56640,21 @@ pub fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(LANE, 4); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe { simd_cast(simd_shr(a, vdupq_n_s64(N as _))) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u16)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(shrn, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -55146,21 +56663,21 @@ pub fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe { simd_cast(simd_shr(a, vdupq_n_u16(N as _))) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u16)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(shrn, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -55169,21 +56686,21 @@ pub fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe { simd_cast(simd_shr(a, vdupq_n_u32(N as _))) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u32)"] +#[doc = "Shift right narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(shrn, N = 2) )] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -55192,111 +56709,282 @@ pub fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe { simd_cast(simd_shr(a, vdupq_n_u64(N as _))) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u32)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert_uimm_bits!(N, 3); + vshiftlins_v8i8::(a, b) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u64)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert_uimm_bits!(N, 3); + vshiftlins_v16i8::(a, b) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p8)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_uimm_bits!(N, 4); + vshiftlins_v4i16::(a, b) } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p8)"] +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") +pub fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_uimm_bits!(N, 4); + vshiftlins_v8i16::(a, b) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N >= 0 && N <= 31); + vshiftlins_v2i32::(a, b) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N >= 0 && N <= 31); + vshiftlins_v4i32::(a, b) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(N >= 0 && N <= 63); + vshiftlins_v1i64::(a, b) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N >= 0 && N <= 63); + vshiftlins_v2i64::(a, b) +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { transmute(vshiftlins_v8i8::(transmute(a), transmute(b))) } +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert_uimm_bits!(N, 3); + unsafe { transmute(vshiftlins_v16i8::(transmute(a), transmute(b))) } +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_uimm_bits!(N, 4); + unsafe { transmute(vshiftlins_v4i16::(transmute(a), transmute(b))) } +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_uimm_bits!(N, 4); + unsafe { transmute(vshiftlins_v8i16::(transmute(a), transmute(b))) } +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 0 && N <= 31); + unsafe { transmute(vshiftlins_v2i32::(transmute(a), transmute(b))) } +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 0 && N <= 31); + unsafe { transmute(vshiftlins_v4i32::(transmute(a), transmute(b))) } +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 0 && N <= 63); + unsafe { transmute(vshiftlins_v1i64::(transmute(a), transmute(b))) } +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 0 && N <= 63); + unsafe { transmute(vshiftlins_v2i64::(transmute(a), transmute(b))) } +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert_uimm_bits!(N, 3); + unsafe { transmute(vshiftlins_v8i8::(transmute(a), transmute(b))) } +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert_uimm_bits!(N, 3); + unsafe { transmute(vshiftlins_v16i8::(transmute(a), transmute(b))) } +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert_uimm_bits!(N, 4); + unsafe { transmute(vshiftlins_v4i16::(transmute(a), transmute(b))) } +} +#[doc = "Shift Left and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert_uimm_bits!(N, 4); + unsafe { transmute(vshiftlins_v8i16::(transmute(a), transmute(b))) } +} +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(LANE, 4); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe { simd_add(a, vshr_n_s8::(b)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p16)"] +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(ssra, N = 2) )] #[rustc_legacy_const_generics(2)] #[cfg_attr( @@ -55307,19 +56995,19 @@ pub fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(LANE, 2); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert!(N >= 1 && N <= 8); + unsafe { simd_add(a, vshrq_n_s8::(b)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p16)"] +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(ssra, N = 2) )] #[rustc_legacy_const_generics(2)] #[cfg_attr( @@ -55330,19 +57018,19 @@ pub fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(LANE, 3); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe { simd_add(a, vshr_n_s16::(b)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p64)"] +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s16)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(ssra, N = 2) )] #[rustc_legacy_const_generics(2)] #[cfg_attr( @@ -55353,19 +57041,19 @@ pub fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_t { - static_assert!(LANE == 0); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert!(N >= 1 && N <= 16); + unsafe { simd_add(a, vshrq_n_s16::(b)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s64)"] +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(ssra, N = 2) )] #[rustc_legacy_const_generics(2)] #[cfg_attr( @@ -55376,19 +57064,19 @@ pub fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t { - static_assert!(LANE == 0); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe { simd_add(a, vshr_n_s32::(b)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u64)"] +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(ssra, N = 2) )] #[rustc_legacy_const_generics(2)] #[cfg_attr( @@ -55399,19 +57087,19 @@ pub fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_t { - static_assert!(LANE == 0); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe { simd_add(a, vshrq_n_s32::(b)) } } -#[doc = "Insert vector element from another vector element"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p64)"] +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s64)"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(ssra, N = 2) )] #[rustc_legacy_const_generics(2)] #[cfg_attr( @@ -55422,1451 +57110,1338 @@ pub fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2_t { - static_assert_uimm_bits!(LANE, 1); - unsafe { simd_insert!(b, LANE as u32, a) } +pub fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(N >= 1 && N <= 64); + unsafe { simd_add(a, vshr_n_s64::(b)) } } -#[doc = "SHA1 hash update accelerator, choose."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1cq_u32)"] +#[doc = "Signed shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s64)"] #[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1c))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1c" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1c")] - fn _vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t; - } - unsafe { _vsha1cq_u32(hash_abcd, hash_e, wk) } -} -#[doc = "SHA1 fixed rotate."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1h_u32)"] -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1h))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") -)] -pub fn vsha1h_u32(hash_e: u32) -> u32 { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1h" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1h")] - fn _vsha1h_u32(hash_e: u32) -> u32; - } - unsafe { _vsha1h_u32(hash_e) } +pub fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N >= 1 && N <= 64); + unsafe { simd_add(a, vshrq_n_s64::(b)) } } -#[doc = "SHA1 hash update accelerator, majority"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1mq_u32)"] +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u8)"] #[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1m))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1m" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1m")] - fn _vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t; - } - unsafe { _vsha1mq_u32(hash_abcd, hash_e, wk) } -} -#[doc = "SHA1 hash update accelerator, parity"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1pq_u32)"] -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1p))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] +pub fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert!(N >= 1 && N <= 8); + unsafe { simd_add(a, vshr_n_u8::(b)) } +} +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1p" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1p")] - fn _vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t; - } - unsafe { _vsha1pq_u32(hash_abcd, hash_e, wk) } -} -#[doc = "SHA1 schedule update accelerator, first part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su0q_u32)"] -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1su0))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] +pub fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert!(N >= 1 && N <= 8); + unsafe { simd_add(a, vshrq_n_u8::(b)) } +} +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1su0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su0")] - fn _vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t; - } - unsafe { _vsha1su0q_u32(w0_3, w4_7, w8_11) } -} -#[doc = "SHA1 schedule update accelerator, second part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su1q_u32)"] -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha1su1))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] +pub fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert!(N >= 1 && N <= 16); + unsafe { simd_add(a, vshr_n_u16::(b)) } +} +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha1su1" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha1su1")] - fn _vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t; - } - unsafe { _vsha1su1q_u32(tw0_3, w12_15) } -} -#[doc = "SHA1 schedule update accelerator, upper part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256h2q_u32)"] -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha256h2))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] +pub fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert!(N >= 1 && N <= 16); + unsafe { simd_add(a, vshrq_n_u16::(b)) } +} +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub fn vsha256h2q_u32(hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha256h2" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h2")] - fn _vsha256h2q_u32( - hash_abcd: uint32x4_t, - hash_efgh: uint32x4_t, - wk: uint32x4_t, - ) -> uint32x4_t; - } - unsafe { _vsha256h2q_u32(hash_abcd, hash_efgh, wk) } -} -#[doc = "SHA1 schedule update accelerator, first part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256hq_u32)"] -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha256h))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] +pub fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N >= 1 && N <= 32); + unsafe { simd_add(a, vshr_n_u32::(b)) } +} +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub fn vsha256hq_u32(hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha256h" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256h")] - fn _vsha256hq_u32( - hash_abcd: uint32x4_t, - hash_efgh: uint32x4_t, - wk: uint32x4_t, - ) -> uint32x4_t; - } - unsafe { _vsha256hq_u32(hash_abcd, hash_efgh, wk) } -} -#[doc = "SHA256 schedule update accelerator, first part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su0q_u32)"] -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha256su0))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] +pub fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N >= 1 && N <= 32); + unsafe { simd_add(a, vshrq_n_u32::(b)) } +} +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha256su0" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su0")] - fn _vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t; - } - unsafe { _vsha256su0q_u32(w0_3, w4_7) } -} -#[doc = "SHA256 schedule update accelerator, second part."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su1q_u32)"] -#[inline] -#[target_feature(enable = "sha2")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(test, assert_instr(sha256su1))] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] +pub fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(N >= 1 && N <= 64); + unsafe { simd_add(a, vshr_n_u64::(b)) } +} +#[doc = "Unsigned shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usra, N = 2) +)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub fn vsha256su1q_u32(tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.crypto.sha256su1" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.sha256su1")] - fn _vsha256su1q_u32(tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t) - -> uint32x4_t; - } - unsafe { _vsha256su1q_u32(tw0_3, w8_11, w12_15) } +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N >= 1 && N <= 64); + unsafe { simd_add(a, vshrq_n_u64::(b)) } } +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] #[inline] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] -fn vshiftlins_v16i8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v16i8")] - fn _vshiftlins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - } - unsafe { _vshiftlins_v16i8(a, b, const { int8x16_t([N as i8; 16]) }) } +pub fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert!(1 <= N && N <= 8); + vshiftrins_v8i8::(a, b) } +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] #[inline] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] -fn vshiftlins_v1i64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v1i64")] - fn _vshiftlins_v1i64(a: int64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t; - } - unsafe { _vshiftlins_v1i64(a, b, const { int64x1_t([N as i64; 1]) }) } +pub fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert!(1 <= N && N <= 8); + vshiftrins_v16i8::(a, b) } +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] #[inline] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] -fn vshiftlins_v2i32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i32")] - fn _vshiftlins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; - } - unsafe { _vshiftlins_v2i32(a, b, const { int32x2_t([N; 2]) }) } +pub fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert!(1 <= N && N <= 16); + vshiftrins_v4i16::(a, b) } +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] #[inline] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] -fn vshiftlins_v2i64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i64")] - fn _vshiftlins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - unsafe { _vshiftlins_v2i64(a, b, const { int64x2_t([N as i64; 2]) }) } +pub fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert!(1 <= N && N <= 16); + vshiftrins_v8i16::(a, b) } +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] #[inline] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] #[rustc_legacy_const_generics(2)] -fn vshiftlins_v4i16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i16")] - fn _vshiftlins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; - } - unsafe { _vshiftlins_v4i16(a, b, const { int16x4_t([N as i16; 4]) }) } +pub fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(1 <= N && N <= 32); + vshiftrins_v2i32::(a, b) } +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] #[inline] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] #[rustc_legacy_const_generics(2)] -fn vshiftlins_v4i32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i32")] - fn _vshiftlins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - unsafe { _vshiftlins_v4i32(a, b, const { int32x4_t([N; 4]) }) } +pub fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(1 <= N && N <= 32); + vshiftrins_v4i32::(a, b) } +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"] #[inline] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] -fn vshiftlins_v8i16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i16")] - fn _vshiftlins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - } - unsafe { _vshiftlins_v8i16(a, b, const { int16x8_t([N as i16; 8]) }) } +pub fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(1 <= N && N <= 64); + vshiftrins_v1i64::(a, b) } +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] #[inline] -#[target_feature(enable = "neon")] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] -fn vshiftlins_v8i8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i8")] - fn _vshiftlins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; - } - unsafe { _vshiftlins_v8i8(a, b, const { int8x8_t([N as i8; 8]) }) } +pub fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(1 <= N && N <= 64); + vshiftrins_v2i64::(a, b) } #[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v16i8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] -fn vshiftrins_v16i8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v16i8")] - fn _vshiftrins_v16i8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - } - unsafe { _vshiftrins_v16i8(a, b, const { int8x16_t([-N as i8; 16]) }) } +pub fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert!(1 <= N && N <= 8); + unsafe { transmute(vshiftrins_v8i8::(transmute(a), transmute(b))) } } #[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v1i64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] -fn vshiftrins_v1i64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v1i64")] - fn _vshiftrins_v1i64(a: int64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t; - } - unsafe { _vshiftrins_v1i64(a, b, const { int64x1_t([-N as i64; 1]) }) } +pub fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert!(1 <= N && N <= 8); + unsafe { transmute(vshiftrins_v16i8::(transmute(a), transmute(b))) } } #[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v2i32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] -fn vshiftrins_v2i32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i32")] - fn _vshiftrins_v2i32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; - } - unsafe { _vshiftrins_v2i32(a, b, const { int32x2_t([-N; 2]) }) } +pub fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert!(1 <= N && N <= 16); + unsafe { transmute(vshiftrins_v4i16::(transmute(a), transmute(b))) } } #[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v2i64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] -fn vshiftrins_v2i64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v2i64")] - fn _vshiftrins_v2i64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - } - unsafe { _vshiftrins_v2i64(a, b, const { int64x2_t([-N as i64; 2]) }) } +pub fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert!(1 <= N && N <= 16); + unsafe { transmute(vshiftrins_v8i16::(transmute(a), transmute(b))) } } #[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v4i16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] #[rustc_legacy_const_generics(2)] -fn vshiftrins_v4i16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i16")] - fn _vshiftrins_v4i16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; - } - unsafe { _vshiftrins_v4i16(a, b, const { int16x4_t([-N as i16; 4]) }) } +pub fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(1 <= N && N <= 32); + unsafe { transmute(vshiftrins_v2i32::(transmute(a), transmute(b))) } } #[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v4i32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] #[rustc_legacy_const_generics(2)] -fn vshiftrins_v4i32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v4i32")] - fn _vshiftrins_v4i32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - } - unsafe { _vshiftrins_v4i32(a, b, const { int32x4_t([-N; 4]) }) } +pub fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(1 <= N && N <= 32); + unsafe { transmute(vshiftrins_v4i32::(transmute(a), transmute(b))) } } #[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v8i16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] -fn vshiftrins_v8i16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i16")] - fn _vshiftrins_v8i16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - } - unsafe { _vshiftrins_v8i16(a, b, const { int16x8_t([-N as i16; 8]) }) } +pub fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(1 <= N && N <= 64); + unsafe { transmute(vshiftrins_v1i64::(transmute(a), transmute(b))) } } #[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v8i8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] -fn vshiftrins_v8i8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftins.v8i8")] - fn _vshiftrins_v8i8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; - } - unsafe { _vshiftrins_v8i8(a, b, const { int8x8_t([-N as i8; 8]) }) } +pub fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(1 <= N && N <= 64); + unsafe { transmute(vshiftrins_v2i64::(transmute(a), transmute(b))) } } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshl_n_s8(a: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { simd_shl(a, vdup_n_s8(N as _)) } +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert!(1 <= N && N <= 8); + unsafe { transmute(vshiftrins_v8i8::(transmute(a), transmute(b))) } } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s8)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe { simd_shl(a, vdupq_n_s8(N as _)) } +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert!(1 <= N && N <= 8); + unsafe { transmute(vshiftrins_v16i8::(transmute(a), transmute(b))) } } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s16)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshl_n_s16(a: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 4); - unsafe { simd_shl(a, vdup_n_s16(N as _)) } +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert!(1 <= N && N <= 16); + unsafe { transmute(vshiftrins_v4i16::(transmute(a), transmute(b))) } } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s16)"] +#[doc = "Shift Right and Insert (immediate)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 4); - unsafe { simd_shl(a, vdupq_n_s16(N as _)) } +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert!(1 <= N && N <= 16); + unsafe { transmute(vshiftrins_v8i16::(transmute(a), transmute(b))) } } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshl_n_s32(a: int32x2_t) -> int32x2_t { - static_assert_uimm_bits!(N, 5); - unsafe { simd_shl(a, vdup_n_s32(N as _)) } -} -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s32)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert_uimm_bits!(N, 5); - unsafe { simd_shl(a, vdupq_n_s32(N as _)) } -} -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshl_n_s64(a: int64x1_t) -> int64x1_t { - static_assert_uimm_bits!(N, 6); - unsafe { simd_shl(a, vdup_n_s64(N as _)) } -} -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert_uimm_bits!(N, 6); - unsafe { simd_shl(a, vdupq_n_s64(N as _)) } -} -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { simd_shl(a, vdup_n_u8(N as _)) } -} -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe { simd_shl(a, vdupq_n_u8(N as _)) } -} -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - unsafe { simd_shl(a, vdup_n_u16(N as _)) } -} -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - unsafe { simd_shl(a, vdupq_n_u16(N as _)) } -} -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u32)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert_uimm_bits!(N, 5); - unsafe { simd_shl(a, vdup_n_u32(N as _)) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) { + vst1_v4f16( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert_uimm_bits!(N, 5); - unsafe { simd_shl(a, vdupq_n_u32(N as _)) } +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) { + vst1q_v8f16( + ptr as *const i8, + transmute(a), + crate::mem::align_of::() as i32, + ) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t { - static_assert_uimm_bits!(N, 6); - unsafe { simd_shl(a, vdup_n_u64(N as _)) } +#[cfg_attr(test, assert_instr(vst1))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst1_f16_x2(a: *mut f16, b: float16x4x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0.v4f16")] + fn _vst1_f16_x2(ptr: *mut f16, a: float16x4_t, b: float16x4_t); + } + _vst1_f16_x2(a, b.0, b.1) } -#[doc = "Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shl, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert_uimm_bits!(N, 6); - unsafe { simd_shl(a, vdupq_n_u64(N as _)) } +#[cfg_attr(test, assert_instr(vst1))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst1q_f16_x2(a: *mut f16, b: float16x8x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0.v8f16")] + fn _vst1q_f16_x2(ptr: *mut f16, a: float16x8_t, b: float16x8_t); + } + _vst1q_f16_x2(a, b.0, b.1) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst1_f16_x2(a: *mut f16, b: float16x4x2_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v8i8" + link_name = "llvm.aarch64.neon.st1x2.v4f16.p0" )] - fn _vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; + fn _vst1_f16_x2(a: float16x4_t, b: float16x4_t, ptr: *mut f16); } - unsafe { _vshl_s8(a, b) } + _vst1_f16_x2(b.0, b.1, a) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst1q_f16_x2(a: *mut f16, b: float16x8x2_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v16i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v16i8" + link_name = "llvm.aarch64.neon.st1x2.v8f16.p0" )] - fn _vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; + fn _vst1q_f16_x2(a: float16x8_t, b: float16x8_t, ptr: *mut f16); } - unsafe { _vshlq_s8(a, b) } + _vst1q_f16_x2(b.0, b.1, a) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { +#[cfg_attr(test, assert_instr(vst1))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst1_f16_x3(a: *mut f16, b: float16x4x3_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v4i16" - )] - fn _vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v4f16")] + fn _vst1_f16_x3(ptr: *mut f16, a: float16x4_t, b: float16x4_t, c: float16x4_t); } - unsafe { _vshl_s16(a, b) } + _vst1_f16_x3(a, b.0, b.1, b.2) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { +#[cfg_attr(test, assert_instr(vst1))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst1q_f16_x3(a: *mut f16, b: float16x8x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v8f16")] + fn _vst1q_f16_x3(ptr: *mut f16, a: float16x8_t, b: float16x8_t, c: float16x8_t); + } + _vst1q_f16_x3(a, b.0, b.1, b.2) +} +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst1_f16_x3(a: *mut f16, b: float16x4x3_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i16")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v8i16" + link_name = "llvm.aarch64.neon.st1x3.v4f16.p0" )] - fn _vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; + fn _vst1_f16_x3(a: float16x4_t, b: float16x4_t, c: float16x4_t, ptr: *mut f16); } - unsafe { _vshlq_s16(a, b) } + _vst1_f16_x3(b.0, b.1, b.2, a) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst1q_f16_x3(a: *mut f16, b: float16x8x3_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i32")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v2i32" + link_name = "llvm.aarch64.neon.st1x3.v8f16.p0" )] - fn _vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; + fn _vst1q_f16_x3(a: float16x8_t, b: float16x8_t, c: float16x8_t, ptr: *mut f16); } - unsafe { _vshl_s32(a, b) } + _vst1q_f16_x3(b.0, b.1, b.2, a) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_f16_x4(a: *mut f16, b: float16x4x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v4i32" - )] - fn _vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v4f16")] + fn _vst1_f16_x4( + ptr: *mut f16, + a: float16x4_t, + b: float16x4_t, + c: float16x4_t, + d: float16x4_t, + ); } - unsafe { _vshlq_s32(a, b) } + _vst1_f16_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_f16_x4(a: *mut f16, b: float16x8x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v1i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v1i64" - )] - fn _vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v8f16")] + fn _vst1q_f16_x4( + ptr: *mut f16, + a: float16x8_t, + b: float16x8_t, + c: float16x8_t, + d: float16x8_t, + ); } - unsafe { _vshl_s64(a, b) } + _vst1q_f16_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Signed Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst1_f16_x4(a: *mut f16, b: float16x4x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i64")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.sshl.v2i64" + link_name = "llvm.aarch64.neon.st1x4.v4f16.p0" )] - fn _vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; + fn _vst1_f16_x4( + a: float16x4_t, + b: float16x4_t, + c: float16x4_t, + d: float16x4_t, + ptr: *mut f16, + ); } - unsafe { _vshlq_s64(a, b) } + _vst1_f16_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst1q_f16_x4(a: *mut f16, b: float16x8x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i8")] #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v8i8" + link_name = "llvm.aarch64.neon.st1x4.v8f16.p0" )] - fn _vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; + fn _vst1q_f16_x4( + a: float16x8_t, + b: float16x8_t, + c: float16x8_t, + d: float16x8_t, + ptr: *mut f16, + ); } - unsafe { _vshl_u8(a, b) } + _vst1q_f16_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v16i8")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v16i8" - )] - fn _vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; - } - unsafe { _vshlq_u8(a, b) } +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1_v2f32::(ptr as *const i8, transmute(a)) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v4i16" - )] - fn _vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; - } - unsafe { _vshl_u16(a, b) } +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1q_v4f32::(ptr as *const i8, transmute(a)) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i16")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v8i16" - )] - fn _vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; - } - unsafe { _vshlq_u16(a, b) } +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1_v8i8::(ptr as *const i8, a) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v2i32" - )] - fn _vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; - } - unsafe { _vshl_u32(a, b) } +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1q_v16i8::(ptr as *const i8, a) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i32")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v4i32" - )] - fn _vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; - } - unsafe { _vshlq_u32(a, b) } +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1_v4i16::(ptr as *const i8, a) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v1i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v1i64" - )] - fn _vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; - } - unsafe { _vshl_u64(a, b) } +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1q_v8i16::(ptr as *const i8, a) } -#[doc = "Unsigned Shift left"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushl) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i64")] - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.ushl.v2i64" - )] - fn _vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; - } - unsafe { _vshlq_u64(a, b) } +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1_v2i32::(ptr as *const i8, a) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s16", N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshll, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshll_n_s16(a: int16x4_t) -> int32x4_t { - static_assert!(N >= 0 && N <= 16); - unsafe { simd_shl(simd_cast(a), vdupq_n_s32(N as _)) } +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1q_v4i32::(ptr as *const i8, a) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s32", N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshll, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshll_n_s32(a: int32x2_t) -> int64x2_t { - static_assert!(N >= 0 && N <= 32); - unsafe { simd_shl(simd_cast(a), vdupq_n_s64(N as _)) } +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1_v1i64::(ptr as *const i8, a) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s8", N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshll, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshll_n_s8(a: int8x8_t) -> int16x8_t { - static_assert!(N >= 0 && N <= 8); - unsafe { simd_shl(simd_cast(a), vdupq_n_s16(N as _)) } +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1q_v2i64::(ptr as *const i8, a) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u16", N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushll, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { - static_assert!(N >= 0 && N <= 16); - unsafe { simd_shl(simd_cast(a), vdupq_n_u32(N as _)) } +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1_v8i8::(ptr as *const i8, transmute(a)) } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u32", N = 2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1q_v16i8::(ptr as *const i8, transmute(a)) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1_v4i16::(ptr as *const i8, transmute(a)) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1q_v8i16::(ptr as *const i8, transmute(a)) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1_v2i32::(ptr as *const i8, transmute(a)) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] +pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1q_v4i32::(ptr as *const i8, transmute(a)) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1_v1i64::(ptr as *const i8, transmute(a)) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1q_v2i64::(ptr as *const i8, transmute(a)) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1_v8i8::(ptr as *const i8, transmute(a)) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] +pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1q_v16i8::(ptr as *const i8, transmute(a)) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1_v4i16::(ptr as *const i8, transmute(a)) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1q_v8i16::(ptr as *const i8, transmute(a)) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1_v1i64::(ptr as *const i8, transmute(a)) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] +pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { + const ALIGN: i32 = crate::mem::align_of::() as i32; + vst1q_v2i64::(ptr as *const i8, transmute(a)) +} +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2f32.p0")] + fn _vst1_f32_x2(ptr: *mut f32, a: float32x2_t, b: float32x2_t); + } + _vst1_f32_x2(a, b.0, b.1) +} +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4f32.p0")] + fn _vst1q_f32_x2(ptr: *mut f32, a: float32x4_t, b: float32x4_t); + } + _vst1q_f32_x2(a, b.0, b.1) +} +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v2f32.p0" + )] + fn _vst1_f32_x2(a: float32x2_t, b: float32x2_t, ptr: *mut f32); + } + _vst1_f32_x2(b.0, b.1, a) +} +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v4f32.p0" + )] + fn _vst1q_f32_x2(a: float32x4_t, b: float32x4_t, ptr: *mut f32); + } + _vst1q_f32_x2(b.0, b.1, a) +} +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v2f32.p0" + )] + fn _vst1_f32_x3(a: float32x2_t, b: float32x2_t, c: float32x2_t, ptr: *mut f32); + } + _vst1_f32_x3(b.0, b.1, b.2, a) +} +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v4f32.p0" + )] + fn _vst1q_f32_x3(a: float32x4_t, b: float32x4_t, c: float32x4_t, ptr: *mut f32); + } + _vst1q_f32_x3(b.0, b.1, b.2, a) +} +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v2f32.p0")] + fn _vst1_f32_x4( + ptr: *mut f32, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + ); + } + _vst1_f32_x4(a, b.0, b.1, b.2, b.3) +} +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v4f32.p0")] + fn _vst1q_f32_x4( + ptr: *mut f32, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + ); + } + _vst1q_f32_x4(a, b.0, b.1, b.2, b.3) +} +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v2f32.p0" + )] + fn _vst1_f32_x4( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + ptr: *mut f32, + ); + } + _vst1_f32_x4(b.0, b.1, b.2, b.3, a) +} +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(test, assert_instr(st1))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v4f32.p0" + )] + fn _vst1q_f32_x4( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + ptr: *mut f32, + ); + } + _vst1q_f32_x4(b.0, b.1, b.2, b.3, a) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushll, N = 2) -)] -#[rustc_legacy_const_generics(1)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst1_lane_f16(a: *mut f16, b: float16x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop, LANE = 0) )] -pub fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { - static_assert!(N >= 0 && N <= 32); - unsafe { simd_shl(simd_cast(a), vdupq_n_u64(N as _)) } +#[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst1q_lane_f16(a: *mut f16, b: float16x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Signed shift left long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushll, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -56875,21 +58450,23 @@ pub fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { - static_assert!(N >= 0 && N <= 8); - unsafe { simd_shl(simd_cast(a), vdupq_n_u16(N as _)) } +pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -56898,22 +58475,23 @@ pub fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshr_n_s8(a: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - let n: i32 = if N == 8 { 7 } else { N }; - unsafe { simd_shr(a, vdup_n_s8(n as _)) } +pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -56922,22 +58500,23 @@ pub fn vshr_n_s8(a: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - let n: i32 = if N == 8 { 7 } else { N }; - unsafe { simd_shr(a, vdupq_n_s8(n as _)) } +pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -56946,22 +58525,23 @@ pub fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshr_n_s16(a: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - let n: i32 = if N == 16 { 15 } else { N }; - unsafe { simd_shr(a, vdup_n_s16(n as _)) } +pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { + static_assert_uimm_bits!(LANE, 4); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -56970,22 +58550,23 @@ pub fn vshr_n_s16(a: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - let n: i32 = if N == 16 { 15 } else { N }; - unsafe { simd_shr(a, vdupq_n_s16(n as _)) } +pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -56994,22 +58575,23 @@ pub fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshr_n_s32(a: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - let n: i32 = if N == 32 { 31 } else { N }; - unsafe { simd_shr(a, vdup_n_s32(n as _)) } +pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -57018,22 +58600,23 @@ pub fn vshr_n_s32(a: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - let n: i32 = if N == 32 { 31 } else { N }; - unsafe { simd_shr(a, vdupq_n_s32(n as _)) } +pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -57042,22 +58625,23 @@ pub fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshr_n_s64(a: int64x1_t) -> int64x1_t { - static_assert!(N >= 1 && N <= 64); - let n: i32 = if N == 64 { 63 } else { N }; - unsafe { simd_shr(a, vdup_n_s64(n as _)) } +pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sshr, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -57066,22 +58650,23 @@ pub fn vshr_n_s64(a: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - let n: i32 = if N == 64 { 63 } else { N }; - unsafe { simd_shr(a, vdupq_n_s64(n as _)) } +pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -57090,26 +58675,23 @@ pub fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - let n: i32 = if N == 8 { - return vdup_n_u8(0); - } else { - N - }; - unsafe { simd_shr(a, vdup_n_u8(n as _)) } +pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -57118,26 +58700,23 @@ pub fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - let n: i32 = if N == 8 { - return vdupq_n_u8(0); - } else { - N - }; - unsafe { simd_shr(a, vdupq_n_u8(n as _)) } +pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { + static_assert_uimm_bits!(LANE, 4); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -57146,26 +58725,23 @@ pub fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - let n: i32 = if N == 16 { - return vdup_n_u16(0); - } else { - N - }; - unsafe { simd_shr(a, vdup_n_u16(n as _)) } +pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -57174,26 +58750,23 @@ pub fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - let n: i32 = if N == 16 { - return vdupq_n_u16(0); - } else { - N - }; - unsafe { simd_shr(a, vdupq_n_u16(n as _)) } +pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -57202,26 +58775,23 @@ pub fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - let n: i32 = if N == 32 { - return vdup_n_u32(0); - } else { - N - }; - unsafe { simd_shr(a, vdup_n_u32(n as _)) } +pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -57230,26 +58800,23 @@ pub fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - let n: i32 = if N == 32 { - return vdupq_n_u32(0); - } else { - N - }; - unsafe { simd_shr(a, vdupq_n_u32(n as _)) } +pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -57258,26 +58825,23 @@ pub fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 1 && N <= 64); - let n: i32 = if N == 64 { - return vdup_n_u64(0); - } else { - N - }; - unsafe { simd_shr(a, vdup_n_u64(n as _)) } +pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ushr, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -57286,26 +58850,23 @@ pub fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - let n: i32 = if N == 64 { - return vdupq_n_u64(0); - } else { - N - }; - unsafe { simd_shr(a, vdupq_n_u64(n as _)) } +pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -57314,21 +58875,23 @@ pub fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe { simd_cast(simd_shr(a, vdupq_n_s16(N as _))) } +pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { + static_assert_uimm_bits!(LANE, 4); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -57337,21 +58900,23 @@ pub fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe { simd_cast(simd_shr(a, vdupq_n_s32(N as _))) } +pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { + static_assert_uimm_bits!(LANE, 2); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -57360,21 +58925,23 @@ pub fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe { simd_cast(simd_shr(a, vdupq_n_s64(N as _))) } +pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { + static_assert_uimm_bits!(LANE, 3); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -57383,21 +58950,23 @@ pub fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe { simd_cast(simd_shr(a, vdupq_n_u16(N as _))) } +pub unsafe fn vst1_lane_p64(a: *mut p64, b: poly64x1_t) { + static_assert!(LANE == 0); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -57406,21 +58975,23 @@ pub fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe { simd_cast(simd_shr(a, vdupq_n_u32(N as _))) } +pub unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t) { + static_assert!(LANE == 0); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift right narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(shrn, N = 2) + assert_instr(nop, LANE = 0) )] -#[rustc_legacy_const_generics(1)] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -57429,1739 +59000,1437 @@ pub fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe { simd_cast(simd_shr(a, vdupq_n_u64(N as _))) } +pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { + static_assert!(LANE == 0); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert_uimm_bits!(N, 3); - vshiftlins_v8i8::(a, b) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { + vst1_s64_x2(transmute(a), transmute(b)) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert_uimm_bits!(N, 3); - vshiftlins_v16i8::(a, b) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { + vst1_s64_x3(transmute(a), transmute(b)) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert_uimm_bits!(N, 4); - vshiftlins_v4i16::(a, b) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { + vst1_s64_x4(transmute(a), transmute(b)) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert_uimm_bits!(N, 4); - vshiftlins_v8i16::(a, b) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { + vst1q_s64_x2(transmute(a), transmute(b)) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 0 && N <= 31); - vshiftlins_v2i32::(a, b) +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { + vst1q_s64_x3(transmute(a), transmute(b)) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x4)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 0 && N <= 31); - vshiftlins_v4i32::(a, b) +#[target_feature(enable = "neon,aes")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { + vst1q_s64_x4(transmute(a), transmute(b)) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N >= 0 && N <= 63); - vshiftlins_v1i64::(a, b) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v8i8.p0" + )] + fn _vst1_s8_x2(a: int8x8_t, b: int8x8_t, ptr: *mut i8); + } + _vst1_s8_x2(b.0, b.1, a) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 0 && N <= 63); - vshiftlins_v2i64::(a, b) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v16i8.p0" + )] + fn _vst1q_s8_x2(a: int8x16_t, b: int8x16_t, ptr: *mut i8); + } + _vst1q_s8_x2(b.0, b.1, a) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { transmute(vshiftlins_v8i8::(transmute(a), transmute(b))) } +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v4i16.p0" + )] + fn _vst1_s16_x2(a: int16x4_t, b: int16x4_t, ptr: *mut i16); + } + _vst1_s16_x2(b.0, b.1, a) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe { transmute(vshiftlins_v16i8::(transmute(a), transmute(b))) } +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v8i16.p0" + )] + fn _vst1q_s16_x2(a: int16x8_t, b: int16x8_t, ptr: *mut i16); + } + _vst1q_s16_x2(b.0, b.1, a) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert_uimm_bits!(N, 4); - unsafe { transmute(vshiftlins_v4i16::(transmute(a), transmute(b))) } +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v2i32.p0" + )] + fn _vst1_s32_x2(a: int32x2_t, b: int32x2_t, ptr: *mut i32); + } + _vst1_s32_x2(b.0, b.1, a) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert_uimm_bits!(N, 4); - unsafe { transmute(vshiftlins_v8i16::(transmute(a), transmute(b))) } +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v4i32.p0" + )] + fn _vst1q_s32_x2(a: int32x4_t, b: int32x4_t, ptr: *mut i32); + } + _vst1q_s32_x2(b.0, b.1, a) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v1i64.p0" + )] + fn _vst1_s64_x2(a: int64x1_t, b: int64x1_t, ptr: *mut i64); + } + _vst1_s64_x2(b.0, b.1, a) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x2.v2i64.p0" + )] + fn _vst1q_s64_x2(a: int64x2_t, b: int64x2_t, ptr: *mut i64); + } + _vst1q_s64_x2(b.0, b.1, a) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 0 && N <= 31); - unsafe { transmute(vshiftlins_v2i32::(transmute(a), transmute(b))) } +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i8.p0")] + fn _vst1_s8_x2(ptr: *mut i8, a: int8x8_t, b: int8x8_t); + } + _vst1_s8_x2(a, b.0, b.1) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 0 && N <= 31); - unsafe { transmute(vshiftlins_v4i32::(transmute(a), transmute(b))) } +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v16i8.p0")] + fn _vst1q_s8_x2(ptr: *mut i8, a: int8x16_t, b: int8x16_t); + } + _vst1q_s8_x2(a, b.0, b.1) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 0 && N <= 63); - unsafe { transmute(vshiftlins_v1i64::(transmute(a), transmute(b))) } +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i16.p0")] + fn _vst1_s16_x2(ptr: *mut i16, a: int16x4_t, b: int16x4_t); + } + _vst1_s16_x2(a, b.0, b.1) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 0 && N <= 63); - unsafe { transmute(vshiftlins_v2i64::(transmute(a), transmute(b))) } +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i16.p0")] + fn _vst1q_s16_x2(ptr: *mut i16, a: int16x8_t, b: int16x8_t); + } + _vst1q_s16_x2(a, b.0, b.1) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert_uimm_bits!(N, 3); - unsafe { transmute(vshiftlins_v8i8::(transmute(a), transmute(b))) } +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i32.p0")] + fn _vst1_s32_x2(ptr: *mut i32, a: int32x2_t, b: int32x2_t); + } + _vst1_s32_x2(a, b.0, b.1) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert_uimm_bits!(N, 3); - unsafe { transmute(vshiftlins_v16i8::(transmute(a), transmute(b))) } +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i32.p0")] + fn _vst1q_s32_x2(ptr: *mut i32, a: int32x4_t, b: int32x4_t); + } + _vst1q_s32_x2(a, b.0, b.1) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert_uimm_bits!(N, 4); - unsafe { transmute(vshiftlins_v4i16::(transmute(a), transmute(b))) } +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v1i64.p0")] + fn _vst1_s64_x2(ptr: *mut i64, a: int64x1_t, b: int64x1_t); + } + _vst1_s64_x2(a, b.0, b.1) } -#[doc = "Shift Left and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsli.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert_uimm_bits!(N, 4); - unsafe { transmute(vshiftlins_v8i16::(transmute(a), transmute(b))) } +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i64.p0")] + fn _vst1q_s64_x2(ptr: *mut i64, a: int64x2_t, b: int64x2_t); + } + _vst1q_s64_x2(a, b.0, b.1) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe { simd_add(a, vshr_n_s8::(b)) } +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v8i8.p0" + )] + fn _vst1_s8_x3(a: int8x8_t, b: int8x8_t, c: int8x8_t, ptr: *mut i8); + } + _vst1_s8_x3(b.0, b.1, b.2, a) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(N >= 1 && N <= 8); - unsafe { simd_add(a, vshrq_n_s8::(b)) } +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v16i8.p0" + )] + fn _vst1q_s8_x3(a: int8x16_t, b: int8x16_t, c: int8x16_t, ptr: *mut i8); + } + _vst1q_s8_x3(b.0, b.1, b.2, a) } -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe { simd_add(a, vshr_n_s16::(b)) } -} -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(N >= 1 && N <= 16); - unsafe { simd_add(a, vshrq_n_s16::(b)) } -} -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s32)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe { simd_add(a, vshr_n_s32::(b)) } -} -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s32)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe { simd_add(a, vshrq_n_s32::(b)) } -} -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(N >= 1 && N <= 64); - unsafe { simd_add(a, vshr_n_s64::(b)) } -} -#[doc = "Signed shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s64)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssra, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe { simd_add(a, vshrq_n_s64::(b)) } -} -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(N >= 1 && N <= 8); - unsafe { simd_add(a, vshr_n_u8::(b)) } -} -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(N >= 1 && N <= 8); - unsafe { simd_add(a, vshrq_n_u8::(b)) } -} -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u16)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(N >= 1 && N <= 16); - unsafe { simd_add(a, vshr_n_u16::(b)) } +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v4i16.p0" + )] + fn _vst1_s16_x3(a: int16x4_t, b: int16x4_t, c: int16x4_t, ptr: *mut i16); + } + _vst1_s16_x3(b.0, b.1, b.2, a) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(N >= 1 && N <= 16); - unsafe { simd_add(a, vshrq_n_u16::(b)) } +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v8i16.p0" + )] + fn _vst1q_s16_x3(a: int16x8_t, b: int16x8_t, c: int16x8_t, ptr: *mut i16); + } + _vst1q_s16_x3(b.0, b.1, b.2, a) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(N >= 1 && N <= 32); - unsafe { simd_add(a, vshr_n_u32::(b)) } +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v2i32.p0" + )] + fn _vst1_s32_x3(a: int32x2_t, b: int32x2_t, c: int32x2_t, ptr: *mut i32); + } + _vst1_s32_x3(b.0, b.1, b.2, a) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(N >= 1 && N <= 32); - unsafe { simd_add(a, vshrq_n_u32::(b)) } +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v4i32.p0" + )] + fn _vst1q_s32_x3(a: int32x4_t, b: int32x4_t, c: int32x4_t, ptr: *mut i32); + } + _vst1q_s32_x3(b.0, b.1, b.2, a) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(N >= 1 && N <= 64); - unsafe { simd_add(a, vshr_n_u64::(b)) } +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v1i64.p0" + )] + fn _vst1_s64_x3(a: int64x1_t, b: int64x1_t, c: int64x1_t, ptr: *mut i64); + } + _vst1_s64_x3(b.0, b.1, b.2, a) } -#[doc = "Unsigned shift right and accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usra, N = 2) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(N >= 1 && N <= 64); - unsafe { simd_add(a, vshrq_n_u64::(b)) } +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x3.v2i64.p0" + )] + fn _vst1q_s64_x3(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i64); + } + _vst1q_s64_x3(b.0, b.1, b.2, a) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - static_assert!(1 <= N && N <= 8); - vshiftrins_v8i8::(a, b) +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v8i8.p0")] + fn _vst1_s8_x3(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t); + } + _vst1_s8_x3(a, b.0, b.1, b.2) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - static_assert!(1 <= N && N <= 8); - vshiftrins_v16i8::(a, b) +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v16i8.p0")] + fn _vst1q_s8_x3(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t); + } + _vst1q_s8_x3(a, b.0, b.1, b.2) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - static_assert!(1 <= N && N <= 16); - vshiftrins_v4i16::(a, b) +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v4i16.p0")] + fn _vst1_s16_x3(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t); + } + _vst1_s16_x3(a, b.0, b.1, b.2) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - static_assert!(1 <= N && N <= 16); - vshiftrins_v8i16::(a, b) +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v8i16.p0")] + fn _vst1q_s16_x3(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t); + } + _vst1q_s16_x3(a, b.0, b.1, b.2) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - static_assert!(1 <= N && N <= 32); - vshiftrins_v2i32::(a, b) +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v2i32.p0")] + fn _vst1_s32_x3(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t); + } + _vst1_s32_x3(a, b.0, b.1, b.2) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - static_assert!(1 <= N && N <= 32); - vshiftrins_v4i32::(a, b) +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v4i32.p0")] + fn _vst1q_s32_x3(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t); + } + _vst1q_s32_x3(a, b.0, b.1, b.2) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - static_assert!(1 <= N && N <= 64); - vshiftrins_v1i64::(a, b) +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v1i64.p0")] + fn _vst1_s64_x3(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t); + } + _vst1_s64_x3(a, b.0, b.1, b.2) } -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - static_assert!(1 <= N && N <= 64); - vshiftrins_v2i64::(a, b) -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - static_assert!(1 <= N && N <= 8); - unsafe { transmute(vshiftrins_v8i8::(transmute(a), transmute(b))) } -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - static_assert!(1 <= N && N <= 8); - unsafe { transmute(vshiftrins_v16i8::(transmute(a), transmute(b))) } -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - static_assert!(1 <= N && N <= 16); - unsafe { transmute(vshiftrins_v4i16::(transmute(a), transmute(b))) } -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - static_assert!(1 <= N && N <= 16); - unsafe { transmute(vshiftrins_v8i16::(transmute(a), transmute(b))) } -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - static_assert!(1 <= N && N <= 32); - unsafe { transmute(vshiftrins_v2i32::(transmute(a), transmute(b))) } -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.32", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - static_assert!(1 <= N && N <= 32); - unsafe { transmute(vshiftrins_v4i32::(transmute(a), transmute(b))) } -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - static_assert!(1 <= N && N <= 64); - unsafe { transmute(vshiftrins_v1i64::(transmute(a), transmute(b))) } -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.64", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - static_assert!(1 <= N && N <= 64); - unsafe { transmute(vshiftrins_v2i64::(transmute(a), transmute(b))) } -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { - static_assert!(1 <= N && N <= 8); - unsafe { transmute(vshiftrins_v8i8::(transmute(a), transmute(b))) } -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.8", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { - static_assert!(1 <= N && N <= 8); - unsafe { transmute(vshiftrins_v16i8::(transmute(a), transmute(b))) } -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { - static_assert!(1 <= N && N <= 16); - unsafe { transmute(vshiftrins_v4i16::(transmute(a), transmute(b))) } -} -#[doc = "Shift Right and Insert (immediate)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsri.16", N = 1))] -#[rustc_legacy_const_generics(2)] -pub fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { - static_assert!(1 <= N && N <= 16); - unsafe { transmute(vshiftrins_v8i16::(transmute(a), transmute(b))) } -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) { - vst1_v4f16( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) { - vst1q_v8f16( - ptr as *const i8, - transmute(a), - crate::mem::align_of::() as i32, - ) +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v2i64.p0")] + fn _vst1q_s64_x3(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t); + } + _vst1q_s64_x3(a, b.0, b.1, b.2) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x2)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(vst1))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst1_f16_x2(a: *mut f16, b: float16x4x2_t) { +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0.v4f16")] - fn _vst1_f16_x2(ptr: *mut f16, a: float16x4_t, b: float16x4_t); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v8i8.p0" + )] + fn _vst1_s8_x4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, ptr: *mut i8); } - _vst1_f16_x2(a, b.0, b.1) + _vst1_s8_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x2)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(vst1))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst1q_f16_x2(a: *mut f16, b: float16x8x2_t) { +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0.v8f16")] - fn _vst1q_f16_x2(ptr: *mut f16, a: float16x8_t, b: float16x8_t); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v16i8.p0" + )] + fn _vst1q_s8_x4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, ptr: *mut i8); } - _vst1q_f16_x2(a, b.0, b.1) + _vst1q_s8_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x2)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] +#[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst1_f16_x2(a: *mut f16, b: float16x4x2_t) { +pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v4f16.p0" + link_name = "llvm.aarch64.neon.st1x4.v4i16.p0" )] - fn _vst1_f16_x2(a: float16x4_t, b: float16x4_t, ptr: *mut f16); + fn _vst1_s16_x4(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, ptr: *mut i16); } - _vst1_f16_x2(b.0, b.1, a) + _vst1_s16_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x2)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] +#[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst1q_f16_x2(a: *mut f16, b: float16x8x2_t) { +pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v8f16.p0" + link_name = "llvm.aarch64.neon.st1x4.v8i16.p0" )] - fn _vst1q_f16_x2(a: float16x8_t, b: float16x8_t, ptr: *mut f16); + fn _vst1q_s16_x4(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, ptr: *mut i16); } - _vst1q_f16_x2(b.0, b.1, a) + _vst1q_s16_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x3)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(vst1))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst1_f16_x3(a: *mut f16, b: float16x4x3_t) { +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v4f16")] - fn _vst1_f16_x3(ptr: *mut f16, a: float16x4_t, b: float16x4_t, c: float16x4_t); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v2i32.p0" + )] + fn _vst1_s32_x4(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, ptr: *mut i32); } - _vst1_f16_x3(a, b.0, b.1, b.2) + _vst1_s32_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x3)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(vst1))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst1q_f16_x3(a: *mut f16, b: float16x8x3_t) { +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st1))] +pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v8f16")] - fn _vst1q_f16_x3(ptr: *mut f16, a: float16x8_t, b: float16x8_t, c: float16x8_t); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st1x4.v4i32.p0" + )] + fn _vst1q_s32_x4(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, ptr: *mut i32); } - _vst1q_f16_x3(a, b.0, b.1, b.2) + _vst1q_s32_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x3)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] +#[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst1_f16_x3(a: *mut f16, b: float16x4x3_t) { +pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v4f16.p0" + link_name = "llvm.aarch64.neon.st1x4.v1i64.p0" )] - fn _vst1_f16_x3(a: float16x4_t, b: float16x4_t, c: float16x4_t, ptr: *mut f16); + fn _vst1_s64_x4(a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t, ptr: *mut i64); } - _vst1_f16_x3(b.0, b.1, b.2, a) + _vst1_s64_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x3)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] +#[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st1))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst1q_f16_x3(a: *mut f16, b: float16x8x3_t) { +pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v8f16.p0" + link_name = "llvm.aarch64.neon.st1x4.v2i64.p0" )] - fn _vst1q_f16_x3(a: float16x8_t, b: float16x8_t, c: float16x8_t, ptr: *mut f16); + fn _vst1q_s64_x4(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i64); } - _vst1q_f16_x3(b.0, b.1, b.2, a) + _vst1q_s64_x4(b.0, b.1, b.2, b.3, a) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x4)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_f16_x4(a: *mut f16, b: float16x4x4_t) { +pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v4f16")] - fn _vst1_f16_x4( - ptr: *mut f16, - a: float16x4_t, - b: float16x4_t, - c: float16x4_t, - d: float16x4_t, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v8i8.p0")] + fn _vst1_s8_x4(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t); } - _vst1_f16_x4(a, b.0, b.1, b.2, b.3) + _vst1_s8_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x4)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_f16_x4(a: *mut f16, b: float16x8x4_t) { +pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v8f16")] - fn _vst1q_f16_x4( - ptr: *mut f16, - a: float16x8_t, - b: float16x8_t, - c: float16x8_t, - d: float16x8_t, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v16i8.p0")] + fn _vst1q_s8_x4(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t); } - _vst1q_f16_x4(a, b.0, b.1, b.2, b.3) + _vst1q_s8_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x4)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst1_f16_x4(a: *mut f16, b: float16x4x4_t) { +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v4f16.p0" - )] - fn _vst1_f16_x4( - a: float16x4_t, - b: float16x4_t, - c: float16x4_t, - d: float16x4_t, - ptr: *mut f16, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v4i16.p0")] + fn _vst1_s16_x4(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t); } - _vst1_f16_x4(b.0, b.1, b.2, b.3, a) + _vst1_s16_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x4)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[target_feature(enable = "neon,fp16")] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst1q_f16_x4(a: *mut f16, b: float16x8x4_t) { +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v8f16.p0" - )] - fn _vst1q_f16_x4( - a: float16x8_t, - b: float16x8_t, - c: float16x8_t, - d: float16x8_t, - ptr: *mut f16, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v8i16.p0")] + fn _vst1q_s16_x4(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t); } - _vst1q_f16_x4(b.0, b.1, b.2, b.3, a) + _vst1q_s16_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1_v2f32::(ptr as *const i8, transmute(a)) +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v2i32.p0")] + fn _vst1_s32_x4(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t); + } + _vst1_s32_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1q_v4f32::(ptr as *const i8, transmute(a)) +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v4i32.p0")] + fn _vst1q_s32_x4(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t); + } + _vst1q_s32_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1_v8i8::(ptr as *const i8, a) +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v1i64.p0")] + fn _vst1_s64_x4(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t); + } + _vst1_s64_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1q_v16i8::(ptr as *const i8, a) +#[cfg_attr(test, assert_instr(vst1))] +pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v2i64.p0")] + fn _vst1q_s64_x4(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t); + } + _vst1q_s64_x4(a, b.0, b.1, b.2, b.3) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1_v4i16::(ptr as *const i8, a) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { + vst1_s8_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1q_v8i16::(ptr as *const i8, a) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { + vst1_s8_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1_v2i32::(ptr as *const i8, a) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { + vst1_s8_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1q_v4i32::(ptr as *const i8, a) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { + vst1q_s8_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1_v1i64::(ptr as *const i8, a) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { + vst1q_s8_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1q_v2i64::(ptr as *const i8, a) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { + vst1q_s8_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1_v8i8::(ptr as *const i8, transmute(a)) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { + vst1_s16_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1q_v16i8::(ptr as *const i8, transmute(a)) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { + vst1_s16_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1_v4i16::(ptr as *const i8, transmute(a)) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { + vst1_s16_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1q_v8i16::(ptr as *const i8, transmute(a)) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { + vst1q_s16_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1_v2i32::(ptr as *const i8, transmute(a)) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { + vst1q_s16_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32"))] -pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1q_v4i32::(ptr as *const i8, transmute(a)) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { + vst1q_s16_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1_v1i64::(ptr as *const i8, transmute(a)) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { + vst1_s32_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1q_v2i64::(ptr as *const i8, transmute(a)) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { + vst1_s32_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1_v8i8::(ptr as *const i8, transmute(a)) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { + vst1_s32_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8"))] -pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1q_v16i8::(ptr as *const i8, transmute(a)) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { + vst1q_s32_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1_v4i16::(ptr as *const i8, transmute(a)) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { + vst1q_s32_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1q_v8i16::(ptr as *const i8, transmute(a)) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { + vst1q_s32_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1_v1i64::(ptr as *const i8, transmute(a)) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st1) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { + vst1_s64_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64"))] -pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { - const ALIGN: i32 = crate::mem::align_of::() as i32; - vst1q_v2i64::(ptr as *const i8, transmute(a)) -} -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst1))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2f32.p0")] - fn _vst1_f32_x2(ptr: *mut f32, a: float32x2_t, b: float32x2_t); - } - _vst1_f32_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst1))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4f32.p0")] - fn _vst1q_f32_x2(ptr: *mut f32, a: float32x4_t, b: float32x4_t); - } - _vst1q_f32_x2(a, b.0, b.1) -} -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v2f32.p0" - )] - fn _vst1_f32_x2(a: float32x2_t, b: float32x2_t, ptr: *mut f32); - } - _vst1_f32_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v4f32.p0" - )] - fn _vst1q_f32_x2(a: float32x4_t, b: float32x4_t, ptr: *mut f32); - } - _vst1q_f32_x2(b.0, b.1, a) -} -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v2f32.p0" - )] - fn _vst1_f32_x3(a: float32x2_t, b: float32x2_t, c: float32x2_t, ptr: *mut f32); - } - _vst1_f32_x3(b.0, b.1, b.2, a) -} -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v4f32.p0" - )] - fn _vst1q_f32_x3(a: float32x4_t, b: float32x4_t, c: float32x4_t, ptr: *mut f32); - } - _vst1q_f32_x3(b.0, b.1, b.2, a) -} -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v2f32.p0")] - fn _vst1_f32_x4( - ptr: *mut f32, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - ); - } - _vst1_f32_x4(a, b.0, b.1, b.2, b.3) -} -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v4f32.p0")] - fn _vst1q_f32_x4( - ptr: *mut f32, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - ); - } - _vst1q_f32_x4(a, b.0, b.1, b.2, b.3) -} -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v2f32.p0" - )] - fn _vst1_f32_x4( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - ptr: *mut f32, - ); - } - _vst1_f32_x4(b.0, b.1, b.2, b.3, a) -} -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(test, assert_instr(st1))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v4f32.p0" - )] - fn _vst1q_f32_x4( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - ptr: *mut f32, - ); - } - _vst1q_f32_x4(b.0, b.1, b.2, b.3, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst1_lane_f16(a: *mut f16, b: float16x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst1q_lane_f16(a: *mut f16, b: float16x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59170,23 +60439,21 @@ pub unsafe fn vst1q_lane_f16(a: *mut f16, b: float16x8_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { + vst1_s64_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59195,23 +60462,21 @@ pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { + vst1_s64_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59220,23 +60485,21 @@ pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { + vst1q_s64_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59245,23 +60508,21 @@ pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { - static_assert_uimm_bits!(LANE, 4); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { + vst1q_s64_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59270,23 +60531,21 @@ pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { + vst1q_s64_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59295,23 +60554,21 @@ pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { + vst1_s8_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59320,23 +60577,21 @@ pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { + vst1_s8_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59345,23 +60600,21 @@ pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { + vst1_s8_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59370,23 +60623,21 @@ pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { + vst1q_s8_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59395,23 +60646,21 @@ pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { + vst1q_s8_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59420,23 +60669,21 @@ pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { - static_assert_uimm_bits!(LANE, 4); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { + vst1q_s8_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59445,23 +60692,21 @@ pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { + vst1_s16_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u16)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59470,23 +60715,21 @@ pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { + vst1_s16_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59495,23 +60738,21 @@ pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { + vst1_s16_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u32)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59520,23 +60761,21 @@ pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { + vst1q_s16_x2(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u64)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59545,23 +60784,21 @@ pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { + vst1q_s16_x3(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p8)"] +#[doc = "Store multiple single-element structures to one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) + assert_instr(st1) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59570,287 +60807,198 @@ pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { + vst1q_s16_x4(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64", ALIGN = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { - static_assert_uimm_bits!(LANE, 4); - *a = simd_extract!(b, LANE as u32); -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +unsafe fn vst1_v1i64(addr: *const i8, val: int64x1_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v1i64.p0")] + fn _vst1_v1i64(addr: *const i8, val: int64x1_t, align: i32); + } + _vst1_v1i64(addr, val, ALIGN) +} #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32", ALIGN = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { - static_assert_uimm_bits!(LANE, 2); - *a = simd_extract!(b, LANE as u32); +unsafe fn vst1_v2f32(addr: *const i8, val: float32x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2f32.p0")] + fn _vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32); + } + _vst1_v2f32(addr, val, ALIGN) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32", ALIGN = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { - static_assert_uimm_bits!(LANE, 3); - *a = simd_extract!(b, LANE as u32); +unsafe fn vst1_v2i32(addr: *const i8, val: int32x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i32.p0")] + fn _vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32); + } + _vst1_v2i32(addr, val, ALIGN) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16", ALIGN = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_lane_p64(a: *mut p64, b: poly64x1_t) { - static_assert!(LANE == 0); - *a = simd_extract!(b, LANE as u32); +unsafe fn vst1_v4i16(addr: *const i8, val: int16x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i16.p0")] + fn _vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32); + } + _vst1_v4i16(addr, val, ALIGN) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8", ALIGN = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t) { - static_assert!(LANE == 0); - *a = simd_extract!(b, LANE as u32); +unsafe fn vst1_v8i8(addr: *const i8, val: int8x8_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i8.p0")] + fn _vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32); + } + _vst1_v8i8(addr, val, ALIGN) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8", ALIGN = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { - static_assert!(LANE == 0); - *a = simd_extract!(b, LANE as u32); +unsafe fn vst1q_v16i8(addr: *const i8, val: int8x16_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v16i8.p0")] + fn _vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32); + } + _vst1q_v16i8(addr, val, ALIGN) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x2)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { - vst1_s64_x2(transmute(a), transmute(b)) +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64", ALIGN = 0))] +#[rustc_legacy_const_generics(2)] +unsafe fn vst1q_v2i64(addr: *const i8, val: int64x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i64.p0")] + fn _vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32); + } + _vst1q_v2i64(addr, val, ALIGN) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x3)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { - vst1_s64_x3(transmute(a), transmute(b)) +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32", ALIGN = 0))] +#[rustc_legacy_const_generics(2)] +unsafe fn vst1q_v4f32(addr: *const i8, val: float32x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4f32.p0")] + fn _vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32); + } + _vst1q_v4f32(addr, val, ALIGN) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { - vst1_s64_x4(transmute(a), transmute(b)) +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32", ALIGN = 0))] +#[rustc_legacy_const_generics(2)] +unsafe fn vst1q_v4i32(addr: *const i8, val: int32x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i32.p0")] + fn _vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32); + } + _vst1q_v4i32(addr, val, ALIGN) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x2)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16", ALIGN = 0))] +#[rustc_legacy_const_generics(2)] +unsafe fn vst1q_v8i16(addr: *const i8, val: int16x8_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i16.p0")] + fn _vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32); + } + _vst1q_v8i16(addr, val, ALIGN) +} +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v4f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { - vst1q_s64_x2(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +unsafe fn vst1_v4f16(addr: *const i8, val: float16x4_t, align: i32) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4f16.p0")] + fn _vst1_v4f16(addr: *const i8, val: float16x4_t, align: i32); + } + _vst1_v4f16(addr, val, align) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x3)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v8f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { - vst1q_s64_x3(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[target_feature(enable = "neon,fp16")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] +unsafe fn vst1q_v8f16(addr: *const i8, val: float16x8_t, align: i32) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8f16.p0")] + fn _vst1q_v8f16(addr: *const i8, val: float16x8_t, align: i32); + } + _vst1q_v8f16(addr, val, align) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x4)"] +#[doc = "Store multiple single-element structures from one, two, three, or four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -59859,861 +61007,688 @@ pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { - vst1q_s64_x4(transmute(a), transmute(b)) +pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { + static_assert_uimm_bits!(LANE, 1); + *a = simd_extract!(b, LANE as u32); } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2_f16(a: *mut f16, b: float16x4x2_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v8i8.p0" + link_name = "llvm.aarch64.neon.st2.v4f16.p0" )] - fn _vst1_s8_x2(a: int8x8_t, b: int8x8_t, ptr: *mut i8); + fn _vst2_f16(a: float16x4_t, b: float16x4_t, ptr: *mut i8); } - _vst1_s8_x2(b.0, b.1, a) + _vst2_f16(b.0, b.1, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_f16(a: *mut f16, b: float16x8x2_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v16i8.p0" + link_name = "llvm.aarch64.neon.st2.v8f16.p0" )] - fn _vst1q_s8_x2(a: int8x16_t, b: int8x16_t, ptr: *mut i8); + fn _vst2q_f16(a: float16x8_t, b: float16x8_t, ptr: *mut i8); } - _vst1q_s8_x2(b.0, b.1, a) + _vst2q_f16(b.0, b.1, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2_f16(a: *mut f16, b: float16x4x2_t) { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v4i16.p0" - )] - fn _vst1_s16_x2(a: int16x4_t, b: int16x4_t, ptr: *mut i16); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0.v4f16")] + fn _vst2_f16(ptr: *mut i8, a: float16x4_t, b: float16x4_t, size: i32); } - _vst1_s16_x2(b.0, b.1, a) + _vst2_f16(a as _, b.0, b.1, 2) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2q_f16(a: *mut f16, b: float16x8x2_t) { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v8i16.p0" - )] - fn _vst1q_s16_x2(a: int16x8_t, b: int16x8_t, ptr: *mut i16); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0.v8f16")] + fn _vst2q_f16(ptr: *mut i8, a: float16x8_t, b: float16x8_t, size: i32); } - _vst1q_s16_x2(b.0, b.1, a) + _vst2q_f16(a as _, b.0, b.1, 2) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v2i32.p0" - )] - fn _vst1_s32_x2(a: int32x2_t, b: int32x2_t, ptr: *mut i32); - } - _vst1_s32_x2(b.0, b.1, a) +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { + crate::core_arch::macros::interleaving_store!(f32, 2, 2, a, b) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v4i32.p0" - )] - fn _vst1q_s32_x2(a: int32x4_t, b: int32x4_t, ptr: *mut i32); - } - _vst1q_s32_x2(b.0, b.1, a) +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { + crate::core_arch::macros::interleaving_store!(f32, 4, 2, a, b) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v1i64.p0" - )] - fn _vst1_s64_x2(a: int64x1_t, b: int64x1_t, ptr: *mut i64); - } - _vst1_s64_x2(b.0, b.1, a) +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { + crate::core_arch::macros::interleaving_store!(i8, 8, 2, a, b) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x2.v2i64.p0" - )] - fn _vst1q_s64_x2(a: int64x2_t, b: int64x2_t, ptr: *mut i64); - } - _vst1q_s64_x2(b.0, b.1, a) +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { + crate::core_arch::macros::interleaving_store!(i8, 16, 2, a, b) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i8.p0")] - fn _vst1_s8_x2(ptr: *mut i8, a: int8x8_t, b: int8x8_t); - } - _vst1_s8_x2(a, b.0, b.1) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { + crate::core_arch::macros::interleaving_store!(i16, 4, 2, a, b) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { + crate::core_arch::macros::interleaving_store!(i16, 8, 2, a, b) +} +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { + crate::core_arch::macros::interleaving_store!(i32, 2, 2, a, b) +} +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st2))] +pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { + crate::core_arch::macros::interleaving_store!(i32, 4, 2, a, b) +} +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v16i8.p0")] - fn _vst1q_s8_x2(ptr: *mut i8, a: int8x16_t, b: int8x16_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2f32.p0")] + fn _vst2_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, size: i32); } - _vst1q_s8_x2(a, b.0, b.1) + _vst2_f32(a as _, b.0, b.1, 4) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i16.p0")] - fn _vst1_s16_x2(ptr: *mut i16, a: int16x4_t, b: int16x4_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4f32.p0")] + fn _vst2q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, size: i32); } - _vst1_s16_x2(a, b.0, b.1) + _vst2q_f32(a as _, b.0, b.1, 4) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v8i16.p0")] - fn _vst1q_s16_x2(ptr: *mut i16, a: int16x8_t, b: int16x8_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i8.p0")] + fn _vst2_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, size: i32); } - _vst1q_s16_x2(a, b.0, b.1) + _vst2_s8(a as _, b.0, b.1, 1) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i32.p0")] - fn _vst1_s32_x2(ptr: *mut i32, a: int32x2_t, b: int32x2_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v16i8.p0")] + fn _vst2q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, size: i32); } - _vst1_s32_x2(a, b.0, b.1) + _vst2q_s8(a as _, b.0, b.1, 1) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v4i32.p0")] - fn _vst1q_s32_x2(ptr: *mut i32, a: int32x4_t, b: int32x4_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i16.p0")] + fn _vst2_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, size: i32); } - _vst1q_s32_x2(a, b.0, b.1) + _vst2_s16(a as _, b.0, b.1, 2) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v1i64.p0")] - fn _vst1_s64_x2(ptr: *mut i64, a: int64x1_t, b: int64x1_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i16.p0")] + fn _vst2q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, size: i32); } - _vst1_s64_x2(a, b.0, b.1) + _vst2q_s16(a as _, b.0, b.1, 2) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] +#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2i32.p0")] + fn _vst2_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, size: i32); + } + _vst2_s32(a as _, b.0, b.1, 4) +} +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { +#[cfg_attr(test, assert_instr(vst2))] +pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.v2i64.p0")] - fn _vst1q_s64_x2(ptr: *mut i64, a: int64x2_t, b: int64x2_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i32.p0")] + fn _vst2q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, size: i32); } - _vst1q_s64_x2(a, b.0, b.1) + _vst2q_s32(a as _, b.0, b.1, 4) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst2_lane_f16(a: *mut f16, b: float16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v8i8.p0" + link_name = "llvm.aarch64.neon.st2lane.v4f16.p0" )] - fn _vst1_s8_x3(a: int8x8_t, b: int8x8_t, c: int8x8_t, ptr: *mut i8); + fn _vst2_lane_f16(a: float16x4_t, b: float16x4_t, n: i64, ptr: *mut i8); } - _vst1_s8_x3(b.0, b.1, b.2, a) + _vst2_lane_f16(b.0, b.1, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst2q_lane_f16(a: *mut f16, b: float16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v16i8.p0" + link_name = "llvm.aarch64.neon.st2lane.v8f16.p0" )] - fn _vst1q_s8_x3(a: int8x16_t, b: int8x16_t, c: int8x16_t, ptr: *mut i8); + fn _vst2q_lane_f16(a: float16x8_t, b: float16x8_t, n: i64, ptr: *mut i8); } - _vst1q_s8_x3(b.0, b.1, b.2, a) + _vst2q_lane_f16(b.0, b.1, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst2_lane_f16(a: *mut f16, b: float16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0.v4f16")] + fn _vst2_lane_f16(ptr: *mut i8, a: float16x4_t, b: float16x4_t, n: i32, size: i32); + } + _vst2_lane_f16(a as _, b.0, b.1, LANE, 2) +} +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst2q_lane_f16(a: *mut f16, b: float16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0.v8f16")] + fn _vst2q_lane_f16(ptr: *mut i8, a: float16x8_t, b: float16x8_t, n: i32, size: i32); + } + _vst2q_lane_f16(a as _, b.0, b.1, LANE, 2) +} +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { +pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v4i16.p0" + link_name = "llvm.aarch64.neon.st2lane.v2f32.p0" )] - fn _vst1_s16_x3(a: int16x4_t, b: int16x4_t, c: int16x4_t, ptr: *mut i16); + fn _vst2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *mut i8); } - _vst1_s16_x3(b.0, b.1, b.2, a) + _vst2_lane_f32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { +pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v8i16.p0" + link_name = "llvm.aarch64.neon.st2lane.v4f32.p0" )] - fn _vst1q_s16_x3(a: int16x8_t, b: int16x8_t, c: int16x8_t, ptr: *mut i16); + fn _vst2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *mut i8); } - _vst1q_s16_x3(b.0, b.1, b.2, a) + _vst2q_lane_f32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { +pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v2i32.p0" + link_name = "llvm.aarch64.neon.st2lane.v8i8.p0" )] - fn _vst1_s32_x3(a: int32x2_t, b: int32x2_t, c: int32x2_t, ptr: *mut i32); + fn _vst2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *mut i8); } - _vst1_s32_x3(b.0, b.1, b.2, a) + _vst2_lane_s8(b.0, b.1, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { +pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v4i32.p0" + link_name = "llvm.aarch64.neon.st2lane.v4i16.p0" )] - fn _vst1q_s32_x3(a: int32x4_t, b: int32x4_t, c: int32x4_t, ptr: *mut i32); + fn _vst2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *mut i8); } - _vst1q_s32_x3(b.0, b.1, b.2, a) + _vst2_lane_s16(b.0, b.1, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { +pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v1i64.p0" + link_name = "llvm.aarch64.neon.st2lane.v8i16.p0" )] - fn _vst1_s64_x3(a: int64x1_t, b: int64x1_t, c: int64x1_t, ptr: *mut i64); + fn _vst2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *mut i8); } - _vst1_s64_x3(b.0, b.1, b.2, a) + _vst2q_lane_s16(b.0, b.1, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { +pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x3.v2i64.p0" + link_name = "llvm.aarch64.neon.st2lane.v2i32.p0" )] - fn _vst1q_s64_x3(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i64); + fn _vst2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *mut i8); } - _vst1q_s64_x3(b.0, b.1, b.2, a) + _vst2_lane_s32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st2, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v8i8.p0")] - fn _vst1_s8_x3(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st2lane.v4i32.p0" + )] + fn _vst2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *mut i8); } - _vst1_s8_x3(a, b.0, b.1, b.2) + _vst2q_lane_s32(b.0, b.1, LANE as i64, a as _) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { +pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v16i8.p0")] - fn _vst1q_s8_x3(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2f32.p0")] + fn _vst2_lane_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, n: i32, size: i32); } - _vst1q_s8_x3(a, b.0, b.1, b.2) + _vst2_lane_f32(a as _, b.0, b.1, LANE, 4) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { +pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v4i16.p0")] - fn _vst1_s16_x3(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4f32.p0")] + fn _vst2q_lane_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, n: i32, size: i32); } - _vst1_s16_x3(a, b.0, b.1, b.2) + _vst2q_lane_f32(a as _, b.0, b.1, LANE, 4) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { +pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v8i16.p0")] - fn _vst1q_s16_x3(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i8.p0")] + fn _vst2_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32); } - _vst1q_s16_x3(a, b.0, b.1, b.2) + _vst2_lane_s8(a as _, b.0, b.1, LANE, 1) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { +pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v2i32.p0")] - fn _vst1_s32_x3(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i16.p0")] + fn _vst2_lane_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, n: i32, size: i32); } - _vst1_s32_x3(a, b.0, b.1, b.2) + _vst2_lane_s16(a as _, b.0, b.1, LANE, 2) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { +pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v4i32.p0")] - fn _vst1q_s32_x3(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i16.p0")] + fn _vst2q_lane_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, n: i32, size: i32); } - _vst1q_s32_x3(a, b.0, b.1, b.2) + _vst2q_lane_s16(a as _, b.0, b.1, LANE, 2) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { +pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v1i64.p0")] - fn _vst1_s64_x3(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2i32.p0")] + fn _vst2_lane_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, n: i32, size: i32); } - _vst1_s64_x3(a, b.0, b.1, b.2) + _vst2_lane_s32(a as _, b.0, b.1, LANE, 4) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst2, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { +pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0.v2i64.p0")] - fn _vst1q_s64_x3(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i32.p0")] + fn _vst2q_lane_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, n: i32, size: i32); } - _vst1q_s64_x3(a, b.0, b.1, b.2) + _vst2q_lane_s32(a as _, b.0, b.1, LANE, 4) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v8i8.p0" - )] - fn _vst1_s8_x4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, ptr: *mut i8); - } - _vst1_s8_x4(b.0, b.1, b.2, b.3, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v16i8.p0" - )] - fn _vst1q_s8_x4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, ptr: *mut i8); - } - _vst1q_s8_x4(b.0, b.1, b.2, b.3, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v4i16.p0" - )] - fn _vst1_s16_x4(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, ptr: *mut i16); - } - _vst1_s16_x4(b.0, b.1, b.2, b.3, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v8i16.p0" - )] - fn _vst1q_s16_x4(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, ptr: *mut i16); - } - _vst1q_s16_x4(b.0, b.1, b.2, b.3, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v2i32.p0" - )] - fn _vst1_s32_x4(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, ptr: *mut i32); - } - _vst1_s32_x4(b.0, b.1, b.2, b.3, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v4i32.p0" - )] - fn _vst1q_s32_x4(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, ptr: *mut i32); - } - _vst1q_s32_x4(b.0, b.1, b.2, b.3, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v1i64.p0" - )] - fn _vst1_s64_x4(a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t, ptr: *mut i64); - } - _vst1_s64_x4(b.0, b.1, b.2, b.3, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st1))] -pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st1x4.v2i64.p0" - )] - fn _vst1q_s64_x4(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i64); - } - _vst1q_s64_x4(b.0, b.1, b.2, b.3, a) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v8i8.p0")] - fn _vst1_s8_x4(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t); - } - _vst1_s8_x4(a, b.0, b.1, b.2, b.3) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v16i8.p0")] - fn _vst1q_s8_x4(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t); - } - _vst1q_s8_x4(a, b.0, b.1, b.2, b.3) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v4i16.p0")] - fn _vst1_s16_x4(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t); - } - _vst1_s16_x4(a, b.0, b.1, b.2, b.3) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v8i16.p0")] - fn _vst1q_s16_x4(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t); - } - _vst1q_s16_x4(a, b.0, b.1, b.2, b.3) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v2i32.p0")] - fn _vst1_s32_x4(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t); - } - _vst1_s32_x4(a, b.0, b.1, b.2, b.3) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v4i32.p0")] - fn _vst1q_s32_x4(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t); - } - _vst1q_s32_x4(a, b.0, b.1, b.2, b.3) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v1i64.p0")] - fn _vst1_s64_x4(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t); - } - _vst1_s64_x4(a, b.0, b.1, b.2, b.3) -} -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst1))] -pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0.v2i64.p0")] - fn _vst1q_s64_x4(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t); - } - _vst1q_s64_x4(a, b.0, b.1, b.2, b.3) -} -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -60722,21 +61697,23 @@ pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { - vst1_s8_x2(transmute(a), transmute(b)) +pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + vst2_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -60745,21 +61722,23 @@ pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { - vst1_s8_x3(transmute(a), transmute(b)) +pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + vst2_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x4)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -60768,21 +61747,23 @@ pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { - vst1_s8_x4(transmute(a), transmute(b)) +pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + vst2q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -60791,21 +61772,23 @@ pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { - vst1q_s8_x2(transmute(a), transmute(b)) +pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { + static_assert_uimm_bits!(LANE, 1); + vst2_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -60814,21 +61797,23 @@ pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { - vst1q_s8_x3(transmute(a), transmute(b)) +pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + vst2q_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x4)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -60837,21 +61822,23 @@ pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { - vst1q_s8_x4(transmute(a), transmute(b)) +pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + vst2_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -60860,21 +61847,23 @@ pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { - vst1_s16_x2(transmute(a), transmute(b)) +pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { + static_assert_uimm_bits!(LANE, 2); + vst2_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2, LANE = 0) )] +#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -60883,20 +61872,21 @@ pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { - vst1_s16_x3(transmute(a), transmute(b)) +pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { + static_assert_uimm_bits!(LANE, 3); + vst2q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x4)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -60906,20 +61896,44 @@ pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { - vst1_s16_x4(transmute(a), transmute(b)) +pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) { + vst2_s64(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { + core::ptr::write_unaligned(a.cast(), b) +} +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { + core::ptr::write_unaligned(a.cast(), b) +} +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(nop) )] #[cfg_attr( not(target_arch = "arm"), @@ -60929,20 +61943,20 @@ pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { - vst1q_s16_x2(transmute(a), transmute(b)) +pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { + vst2_s64(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -60952,20 +61966,20 @@ pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { - vst1q_s16_x3(transmute(a), transmute(b)) +pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { + vst2_s8(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x4)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -60975,20 +61989,20 @@ pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { - vst1q_s16_x4(transmute(a), transmute(b)) +pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { + vst2q_s8(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -60998,20 +62012,20 @@ pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { - vst1_s32_x2(transmute(a), transmute(b)) +pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { + vst2_s16(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -61021,20 +62035,20 @@ pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { - vst1_s32_x3(transmute(a), transmute(b)) +pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { + vst2q_s16(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x4)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -61044,20 +62058,20 @@ pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { - vst1_s32_x4(transmute(a), transmute(b)) +pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { + vst2_s32(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -61067,20 +62081,20 @@ pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { - vst1q_s32_x2(transmute(a), transmute(b)) +pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { + vst2q_s32(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -61090,20 +62104,20 @@ pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { - vst1q_s32_x3(transmute(a), transmute(b)) +pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { + vst2_s8(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x4)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -61113,20 +62127,20 @@ pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { - vst1q_s32_x4(transmute(a), transmute(b)) +pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { + vst2q_s8(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x2)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -61136,20 +62150,20 @@ pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { - vst1_s64_x2(transmute(a), transmute(b)) +pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { + vst2_s16(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x3)"] +#[doc = "Store multiple 2-element structures from two registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) + assert_instr(st2) )] #[cfg_attr( not(target_arch = "arm"), @@ -61159,2445 +62173,3117 @@ pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { - vst1_s64_x3(transmute(a), transmute(b)) +pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { + vst2q_s16(transmute(a), transmute(b)) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x4)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { - vst1_s64_x4(transmute(a), transmute(b)) +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3_f16(a: *mut f16, b: float16x4x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0.v4f16")] + fn _vst3_f16(ptr: *mut i8, a: float16x4_t, b: float16x4_t, c: float16x4_t, size: i32); + } + _vst3_f16(a as _, b.0, b.1, b.2, 2) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x2)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { - vst1q_s64_x2(transmute(a), transmute(b)) +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3q_f16(a: *mut f16, b: float16x8x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0.v8f16")] + fn _vst3q_f16(ptr: *mut i8, a: float16x8_t, b: float16x8_t, c: float16x8_t, size: i32); + } + _vst3q_f16(a as _, b.0, b.1, b.2, 2) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x3)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { - vst1q_s64_x3(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3_f16(a: *mut f16, b: float16x4x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v4f16.p0" + )] + fn _vst3_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t, ptr: *mut i8); + } + _vst3_f16(b.0, b.1, b.2, a as _) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x4)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { - vst1q_s64_x4(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_f16(a: *mut f16, b: float16x8x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3.v8f16.p0" + )] + fn _vst3q_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t, ptr: *mut i8); + } + _vst3q_f16(b.0, b.1, b.2, a as _) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x2)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { - vst1_s8_x2(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { + crate::core_arch::macros::interleaving_store!(f32, 2, 3, a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x3)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { - vst1_s8_x3(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { + crate::core_arch::macros::interleaving_store!(f32, 4, 3, a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x4)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { - vst1_s8_x4(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { + crate::core_arch::macros::interleaving_store!(i8, 8, 3, a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x2)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { - vst1q_s8_x2(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { + crate::core_arch::macros::interleaving_store!(i8, 16, 3, a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x3)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { - vst1q_s8_x3(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { + crate::core_arch::macros::interleaving_store!(i16, 4, 3, a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x4)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { - vst1q_s8_x4(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { + crate::core_arch::macros::interleaving_store!(i16, 8, 3, a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x2)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { - vst1_s16_x2(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { + crate::core_arch::macros::interleaving_store!(i32, 2, 3, a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x3)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { - vst1_s16_x3(transmute(a), transmute(b)) +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst3))] +pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { + crate::core_arch::macros::interleaving_store!(i32, 4, 3, a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x4)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { - vst1_s16_x4(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { + crate::core_arch::macros::interleaving_store!(f32, 2, 3, a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x2)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { - vst1q_s16_x2(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { + crate::core_arch::macros::interleaving_store!(f32, 4, 3, a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x3)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { - vst1q_s16_x3(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { + crate::core_arch::macros::interleaving_store!(i8, 8, 3, a, b) } -#[doc = "Store multiple single-element structures to one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x4)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st1) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { - vst1q_s16_x4(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { + crate::core_arch::macros::interleaving_store!(i8, 16, 3, a, b) } +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64", ALIGN = 0))] -#[rustc_legacy_const_generics(2)] -unsafe fn vst1_v1i64(addr: *const i8, val: int64x1_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v1i64.p0")] - fn _vst1_v1i64(addr: *const i8, val: int64x1_t, align: i32); - } - _vst1_v1i64(addr, val, ALIGN) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { + crate::core_arch::macros::interleaving_store!(i16, 4, 3, a, b) } +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32", ALIGN = 0))] -#[rustc_legacy_const_generics(2)] -unsafe fn vst1_v2f32(addr: *const i8, val: float32x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2f32.p0")] - fn _vst1_v2f32(addr: *const i8, val: float32x2_t, align: i32); - } - _vst1_v2f32(addr, val, ALIGN) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { + crate::core_arch::macros::interleaving_store!(i16, 8, 3, a, b) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { + crate::core_arch::macros::interleaving_store!(i32, 2, 3, a, b) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st3))] +pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { + crate::core_arch::macros::interleaving_store!(i32, 4, 3, a, b) } +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32", ALIGN = 0))] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] -unsafe fn vst1_v2i32(addr: *const i8, val: int32x2_t) { +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst3_lane_f16(a: *mut f16, b: float16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i32.p0")] - fn _vst1_v2i32(addr: *const i8, val: int32x2_t, align: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v4f16")] + fn _vst3_lane_f16( + ptr: *mut i8, + a: float16x4_t, + b: float16x4_t, + c: float16x4_t, + n: i32, + size: i32, + ); } - _vst1_v2i32(addr, val, ALIGN) + _vst3_lane_f16(a as _, b.0, b.1, b.2, LANE, 4) } +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16", ALIGN = 0))] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] -unsafe fn vst1_v4i16(addr: *const i8, val: int16x4_t) { +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst3q_lane_f16(a: *mut f16, b: float16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i16.p0")] - fn _vst1_v4i16(addr: *const i8, val: int16x4_t, align: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v8f16")] + fn _vst3q_lane_f16( + ptr: *mut i8, + a: float16x8_t, + b: float16x8_t, + c: float16x8_t, + n: i32, + size: i32, + ); } - _vst1_v4i16(addr, val, ALIGN) + _vst3q_lane_f16(a as _, b.0, b.1, b.2, LANE, 4) } +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8", ALIGN = 0))] +#[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] -unsafe fn vst1_v8i8(addr: *const i8, val: int8x8_t) { +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst3_lane_f16(a: *mut f16, b: float16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i8.p0")] - fn _vst1_v8i8(addr: *const i8, val: int8x8_t, align: i32); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v4f16.p0" + )] + fn _vst3_lane_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t, n: i64, ptr: *mut i8); } - _vst1_v8i8(addr, val, ALIGN) + _vst3_lane_f16(b.0, b.1, b.2, LANE as i64, a as _) } +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.8", ALIGN = 0))] +#[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] -unsafe fn vst1q_v16i8(addr: *const i8, val: int8x16_t) { +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +pub unsafe fn vst3q_lane_f16(a: *mut f16, b: float16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v16i8.p0")] - fn _vst1q_v16i8(addr: *const i8, val: int8x16_t, align: i32); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v8f16.p0" + )] + fn _vst3q_lane_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t, n: i64, ptr: *mut i8); } - _vst1q_v16i8(addr, val, ALIGN) + _vst3q_lane_f16(b.0, b.1, b.2, LANE as i64, a as _) } +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.64", ALIGN = 0))] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] -unsafe fn vst1q_v2i64(addr: *const i8, val: int64x2_t) { +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v2i64.p0")] - fn _vst1q_v2i64(addr: *const i8, val: int64x2_t, align: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v2f32")] + fn _vst3_lane_f32( + ptr: *mut i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + n: i32, + size: i32, + ); } - _vst1q_v2i64(addr, val, ALIGN) + _vst3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) } +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32", ALIGN = 0))] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] -unsafe fn vst1q_v4f32(addr: *const i8, val: float32x4_t) { +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4f32.p0")] - fn _vst1q_v4f32(addr: *const i8, val: float32x4_t, align: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v4f32")] + fn _vst3q_lane_f32( + ptr: *mut i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + n: i32, + size: i32, + ); } - _vst1q_v4f32(addr, val, ALIGN) + _vst3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) } +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.32", ALIGN = 0))] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] -unsafe fn vst1q_v4i32(addr: *const i8, val: int32x4_t) { +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4i32.p0")] - fn _vst1q_v4i32(addr: *const i8, val: int32x4_t, align: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v8i8")] + fn _vst3_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i32, size: i32); } - _vst1q_v4i32(addr, val, ALIGN) + _vst3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) } +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16", ALIGN = 0))] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] -unsafe fn vst1q_v8i16(addr: *const i8, val: int16x8_t) { +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8i16.p0")] - fn _vst1q_v8i16(addr: *const i8, val: int16x8_t, align: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v4i16")] + fn _vst3_lane_s16( + ptr: *mut i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + n: i32, + size: i32, + ); } - _vst1q_v8i16(addr, val, ALIGN) + _vst3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v4f16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[target_feature(enable = "neon,fp16")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -unsafe fn vst1_v4f16(addr: *const i8, val: float16x4_t, align: i32) { +pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v4f16.p0")] - fn _vst1_v4f16(addr: *const i8, val: float16x4_t, align: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v8i16")] + fn _vst3q_lane_s16( + ptr: *mut i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + n: i32, + size: i32, + ); } - _vst1_v4f16(addr, val, align) + _vst3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v8f16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[target_feature(enable = "neon,fp16")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vst1.16"))] -unsafe fn vst1q_v8f16(addr: *const i8, val: float16x8_t, align: i32) { +pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1.v8f16.p0")] - fn _vst1q_v8f16(addr: *const i8, val: float16x8_t, align: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v2i32")] + fn _vst3_lane_s32( + ptr: *mut i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + n: i32, + size: i32, + ); } - _vst1q_v8f16(addr, val, align) + _vst3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Store multiple single-element structures from one, two, three, or four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p64)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop, LANE = 0) -)] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { - static_assert_uimm_bits!(LANE, 1); - *a = simd_extract!(b, LANE as u32); +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v4i32")] + fn _vst3q_lane_s32( + ptr: *mut i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + n: i32, + size: i32, + ); + } + _vst3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2_f16(a: *mut f16, b: float16x4x2_t) { +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v4f16.p0" + link_name = "llvm.aarch64.neon.st3lane.v2f32.p0" )] - fn _vst2_f16(a: float16x4_t, b: float16x4_t, ptr: *mut i8); + fn _vst3_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, n: i64, ptr: *mut i8); } - _vst2_f16(b.0, b.1, a as _) + _vst3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_f16(a: *mut f16, b: float16x8x2_t) { +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2.v8f16.p0" + link_name = "llvm.aarch64.neon.st3lane.v4f32.p0" )] - fn _vst2q_f16(a: float16x8_t, b: float16x8_t, ptr: *mut i8); + fn _vst3q_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, n: i64, ptr: *mut i8); } - _vst2q_f16(b.0, b.1, a as _) + _vst3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2_f16(a: *mut f16, b: float16x4x2_t) { +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0.v4f16")] - fn _vst2_f16(ptr: *mut i8, a: float16x4_t, b: float16x4_t, size: i32); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v8i8.p0" + )] + fn _vst3_lane_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i64, ptr: *mut i8); } - _vst2_f16(a as _, b.0, b.1, 2) + _vst3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2q_f16(a: *mut f16, b: float16x8x2_t) { +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0.v8f16")] - fn _vst2q_f16(ptr: *mut i8, a: float16x8_t, b: float16x8_t, size: i32); + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v4i16.p0" + )] + fn _vst3_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, n: i64, ptr: *mut i8); } - _vst2q_f16(a as _, b.0, b.1, 2) + _vst3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { - crate::core_arch::macros::interleaving_store!(f32, 2, 2, a, b) +pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v8i16.p0" + )] + fn _vst3q_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, n: i64, ptr: *mut i8); + } + _vst3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { - crate::core_arch::macros::interleaving_store!(f32, 4, 2, a, b) +pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v2i32.p0" + )] + fn _vst3_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, n: i64, ptr: *mut i8); + } + _vst3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st3, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { - crate::core_arch::macros::interleaving_store!(i8, 8, 2, a, b) +pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st3lane.v4i32.p0" + )] + fn _vst3q_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64, ptr: *mut i8); + } + _vst3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { - crate::core_arch::macros::interleaving_store!(i8, 16, 2, a, b) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + vst3_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] -#[doc = "## Safety"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u16)"] +#[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { - crate::core_arch::macros::interleaving_store!(i16, 4, 2, a, b) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + vst3_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { - crate::core_arch::macros::interleaving_store!(i16, 8, 2, a, b) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + vst3q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { - crate::core_arch::macros::interleaving_store!(i32, 2, 2, a, b) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { + static_assert_uimm_bits!(LANE, 1); + vst3_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st2))] -pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { - crate::core_arch::macros::interleaving_store!(i32, 4, 2, a, b) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + vst3q_lane_s32::(transmute(a), transmute(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2f32.p0")] - fn _vst2_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, size: i32); - } - _vst2_f32(a as _, b.0, b.1, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + vst3_lane_s8::(transmute(a), transmute(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4f32.p0")] - fn _vst2q_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, size: i32); - } - _vst2q_f32(a as _, b.0, b.1, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { + static_assert_uimm_bits!(LANE, 2); + vst3_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i8.p0")] - fn _vst2_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, size: i32); - } - _vst2_s8(a as _, b.0, b.1, 1) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { + static_assert_uimm_bits!(LANE, 3); + vst3q_lane_s16::(transmute(a), transmute(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v16i8.p0")] - fn _vst2q_s8(ptr: *mut i8, a: int8x16_t, b: int8x16_t, size: i32); - } - _vst2q_s8(a as _, b.0, b.1, 1) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) { + vst3_s64(transmute(a), transmute(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i16.p0")] - fn _vst2_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, size: i32); - } - _vst2_s16(a as _, b.0, b.1, 2) +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { + core::ptr::write_unaligned(a.cast(), b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v8i16.p0")] - fn _vst2q_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, size: i32); - } - _vst2q_s16(a as _, b.0, b.1, 2) +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { + core::ptr::write_unaligned(a.cast(), b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v2i32.p0")] - fn _vst2_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, size: i32); - } - _vst2_s32(a as _, b.0, b.1, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { + vst3_s64(transmute(a), transmute(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst2))] -pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.v4i32.p0")] - fn _vst2q_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, size: i32); - } - _vst2q_s32(a as _, b.0, b.1, 4) +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { + vst3_s8(transmute(a), transmute(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst2_lane_f16(a: *mut f16, b: float16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v4f16.p0" - )] - fn _vst2_lane_f16(a: float16x4_t, b: float16x4_t, n: i64, ptr: *mut i8); - } - _vst2_lane_f16(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { + vst3q_s8(transmute(a), transmute(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst2q_lane_f16(a: *mut f16, b: float16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v8f16.p0" - )] - fn _vst2q_lane_f16(a: float16x8_t, b: float16x8_t, n: i64, ptr: *mut i8); - } - _vst2q_lane_f16(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { + vst3_s16(transmute(a), transmute(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst2_lane_f16(a: *mut f16, b: float16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0.v4f16")] - fn _vst2_lane_f16(ptr: *mut i8, a: float16x4_t, b: float16x4_t, n: i32, size: i32); - } - _vst2_lane_f16(a as _, b.0, b.1, LANE, 2) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { + vst3q_s16(transmute(a), transmute(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f16)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst2q_lane_f16(a: *mut f16, b: float16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0.v8f16")] - fn _vst2q_lane_f16(ptr: *mut i8, a: float16x8_t, b: float16x8_t, n: i32, size: i32); - } - _vst2q_lane_f16(a as _, b.0, b.1, LANE, 2) +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { + vst3_s32(transmute(a), transmute(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v2f32.p0" - )] - fn _vst2_lane_f32(a: float32x2_t, b: float32x2_t, n: i64, ptr: *mut i8); - } - _vst2_lane_f32(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { + vst3q_s32(transmute(a), transmute(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v4f32.p0" - )] - fn _vst2q_lane_f32(a: float32x4_t, b: float32x4_t, n: i64, ptr: *mut i8); - } - _vst2q_lane_f32(b.0, b.1, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { + vst3_s8(transmute(a), transmute(b)) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { - static_assert_uimm_bits!(LANE, 3); +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { + vst3q_s8(transmute(a), transmute(b)) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { + vst3_s16(transmute(a), transmute(b)) +} +#[doc = "Store multiple 3-element structures from three registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st3) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { + vst3q_s16(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4_f16(a: *mut f16, b: float16x4x4_t) { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v8i8.p0" - )] - fn _vst2_lane_s8(a: int8x8_t, b: int8x8_t, n: i64, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v4f16")] + fn _vst4_f16( + ptr: *mut i8, + a: float16x4_t, + b: float16x4_t, + c: float16x4_t, + d: float16x4_t, + size: i32, + ); } - _vst2_lane_s8(b.0, b.1, LANE as i64, a as _) + _vst4_f16(a as _, b.0, b.1, b.2, b.3, 2) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4q_f16(a: *mut f16, b: float16x8x4_t) { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v4i16.p0" - )] - fn _vst2_lane_s16(a: int16x4_t, b: int16x4_t, n: i64, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v8f16")] + fn _vst4q_f16( + ptr: *mut i8, + a: float16x8_t, + b: float16x8_t, + c: float16x8_t, + d: float16x8_t, + size: i32, + ); } - _vst2_lane_s16(b.0, b.1, LANE as i64, a as _) + _vst4q_f16(a as _, b.0, b.1, b.2, b.3, 2) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4_f16(a: *mut f16, b: float16x4x4_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v8i16.p0" + link_name = "llvm.aarch64.neon.st4.v4f16.p0" )] - fn _vst2q_lane_s16(a: int16x8_t, b: int16x8_t, n: i64, ptr: *mut i8); + fn _vst4_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t, d: float16x4_t, ptr: *mut i8); } - _vst2q_lane_s16(b.0, b.1, LANE as i64, a as _) + _vst4_f16(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] +#[unstable(feature = "stdarch_neon_f16", issue = "136306")] +#[cfg(not(target_arch = "arm64ec"))] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_f16(a: *mut f16, b: float16x8x4_t) { unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v2i32.p0" + link_name = "llvm.aarch64.neon.st4.v8f16.p0" )] - fn _vst2_lane_s32(a: int32x2_t, b: int32x2_t, n: i64, ptr: *mut i8); + fn _vst4q_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t, d: float16x8_t, ptr: *mut i8); } - _vst2_lane_s32(b.0, b.1, LANE as i64, a as _) + _vst4q_f16(b.0, b.1, b.2, b.3, a as _) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st2, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st2lane.v4i32.p0" - )] - fn _vst2q_lane_s32(a: int32x4_t, b: int32x4_t, n: i64, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v2f32")] + fn _vst4_f32( + ptr: *mut i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + size: i32, + ); } - _vst2q_lane_s32(b.0, b.1, LANE as i64, a as _) + _vst4_f32(a as _, b.0, b.1, b.2, b.3, 4) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2f32.p0")] - fn _vst2_lane_f32(ptr: *mut i8, a: float32x2_t, b: float32x2_t, n: i32, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v4f32")] + fn _vst4q_f32( + ptr: *mut i8, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + size: i32, + ); } - _vst2_lane_f32(a as _, b.0, b.1, LANE, 4) + _vst4q_f32(a as _, b.0, b.1, b.2, b.3, 4) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4f32.p0")] - fn _vst2q_lane_f32(ptr: *mut i8, a: float32x4_t, b: float32x4_t, n: i32, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v8i8")] + fn _vst4_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, size: i32); } - _vst2q_lane_f32(a as _, b.0, b.1, LANE, 4) + _vst4_s8(a as _, b.0, b.1, b.2, b.3, 1) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { - static_assert_uimm_bits!(LANE, 3); +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i8.p0")] - fn _vst2_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v16i8")] + fn _vst4q_s8( + ptr: *mut i8, + a: int8x16_t, + b: int8x16_t, + c: int8x16_t, + d: int8x16_t, + size: i32, + ); } - _vst2_lane_s8(a as _, b.0, b.1, LANE, 1) + _vst4q_s8(a as _, b.0, b.1, b.2, b.3, 1) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i16.p0")] - fn _vst2_lane_s16(ptr: *mut i8, a: int16x4_t, b: int16x4_t, n: i32, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v4i16")] + fn _vst4_s16( + ptr: *mut i8, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + size: i32, + ); } - _vst2_lane_s16(a as _, b.0, b.1, LANE, 2) + _vst4_s16(a as _, b.0, b.1, b.2, b.3, 2) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v8i16.p0")] - fn _vst2q_lane_s16(ptr: *mut i8, a: int16x8_t, b: int16x8_t, n: i32, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v8i16")] + fn _vst4q_s16( + ptr: *mut i8, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + size: i32, + ); } - _vst2q_lane_s16(a as _, b.0, b.1, LANE, 2) + _vst4q_s16(a as _, b.0, b.1, b.2, b.3, 2) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v2i32.p0")] - fn _vst2_lane_s32(ptr: *mut i8, a: int32x2_t, b: int32x2_t, n: i32, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v2i32")] + fn _vst4_s32( + ptr: *mut i8, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + size: i32, + ); } - _vst2_lane_s32(a as _, b.0, b.1, LANE, 4) + _vst4_s32(a as _, b.0, b.1, b.2, b.3, 4) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst2, LANE = 0))] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(vst4))] +pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.v4i32.p0")] - fn _vst2q_lane_s32(ptr: *mut i8, a: int32x4_t, b: int32x4_t, n: i32, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v4i32")] + fn _vst4q_s32( + ptr: *mut i8, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + size: i32, + ); } - _vst2q_lane_s32(a as _, b.0, b.1, LANE, 4) + _vst4q_s32(a as _, b.0, b.1, b.2, b.3, 4) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - vst2_lane_s8::(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { + crate::core_arch::macros::interleaving_store!(f32, 2, 4, a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - vst2_lane_s16::(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { + crate::core_arch::macros::interleaving_store!(f32, 4, 4, a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - vst2q_lane_s16::(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { + crate::core_arch::macros::interleaving_store!(i8, 8, 4, a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { - static_assert_uimm_bits!(LANE, 1); - vst2_lane_s32::(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { + crate::core_arch::macros::interleaving_store!(i8, 16, 4, a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - vst2q_lane_s32::(transmute(a), transmute(b)) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - vst2_lane_s8::(transmute(a), transmute(b)) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { - static_assert_uimm_bits!(LANE, 2); - vst2_lane_s16::(transmute(a), transmute(b)) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { - static_assert_uimm_bits!(LANE, 3); - vst2q_lane_s16::(transmute(a), transmute(b)) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) { - vst2_s64(transmute(a), transmute(b)) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { - core::ptr::write_unaligned(a.cast(), b) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { - core::ptr::write_unaligned(a.cast(), b) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { - vst2_s64(transmute(a), transmute(b)) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { - vst2_s8(transmute(a), transmute(b)) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { - vst2q_s8(transmute(a), transmute(b)) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { - vst2_s16(transmute(a), transmute(b)) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { - vst2q_s16(transmute(a), transmute(b)) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { - vst2_s32(transmute(a), transmute(b)) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { - vst2q_s32(transmute(a), transmute(b)) -} -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { - vst2_s8(transmute(a), transmute(b)) +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { + crate::core_arch::macros::interleaving_store!(i16, 4, 4, a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { - vst2q_s8(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { + crate::core_arch::macros::interleaving_store!(i16, 8, 4, a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { - vst2_s16(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { + crate::core_arch::macros::interleaving_store!(i32, 2, 4, a, b) } -#[doc = "Store multiple 2-element structures from two registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st2) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { - vst2q_s16(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(st4))] +pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { + crate::core_arch::macros::interleaving_store!(i32, 4, 4, a, b) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3_f16(a: *mut f16, b: float16x4x3_t) { +pub unsafe fn vst4_lane_f16(a: *mut f16, b: float16x4x4_t) { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0.v4f16")] - fn _vst3_f16(ptr: *mut i8, a: float16x4_t, b: float16x4_t, c: float16x4_t, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v4f16")] + fn _vst4_lane_f16( + ptr: *mut i8, + a: float16x4_t, + b: float16x4_t, + c: float16x4_t, + d: float16x4_t, + n: i32, + size: i32, + ); } - _vst3_f16(a as _, b.0, b.1, b.2, 2) + _vst4_lane_f16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3q_f16(a: *mut f16, b: float16x8x3_t) { +pub unsafe fn vst4q_lane_f16(a: *mut f16, b: float16x8x4_t) { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0.v8f16")] - fn _vst3q_f16(ptr: *mut i8, a: float16x8_t, b: float16x8_t, c: float16x8_t, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v8f16")] + fn _vst4q_lane_f16( + ptr: *mut i8, + a: float16x8_t, + b: float16x8_t, + c: float16x8_t, + d: float16x8_t, + n: i32, + size: i32, + ); } - _vst3q_f16(a as _, b.0, b.1, b.2, 2) + _vst4q_lane_f16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3_f16(a: *mut f16, b: float16x4x3_t) { +pub unsafe fn vst4_lane_f16(a: *mut f16, b: float16x4x4_t) { + static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v4f16.p0" + link_name = "llvm.aarch64.neon.st4lane.v4f16.p0" )] - fn _vst3_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t, ptr: *mut i8); + fn _vst4_lane_f16( + a: float16x4_t, + b: float16x4_t, + c: float16x4_t, + d: float16x4_t, + n: i64, + ptr: *mut i8, + ); } - _vst3_f16(b.0, b.1, b.2, a as _) + _vst4_lane_f16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_f16(a: *mut f16, b: float16x8x3_t) { +pub unsafe fn vst4q_lane_f16(a: *mut f16, b: float16x8x4_t) { + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3.v8f16.p0" + link_name = "llvm.aarch64.neon.st4lane.v8f16.p0" )] - fn _vst3q_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t, ptr: *mut i8); + fn _vst4q_lane_f16( + a: float16x8_t, + b: float16x8_t, + c: float16x8_t, + d: float16x8_t, + n: i64, + ptr: *mut i8, + ); } - _vst3q_f16(b.0, b.1, b.2, a as _) + _vst4q_lane_f16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { - crate::core_arch::macros::interleaving_store!(f32, 2, 3, a, b) +pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v2f32")] + fn _vst4_lane_f32( + ptr: *mut i8, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i32, + size: i32, + ); + } + _vst4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { - crate::core_arch::macros::interleaving_store!(f32, 4, 3, a, b) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { - crate::core_arch::macros::interleaving_store!(i8, 8, 3, a, b) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { - crate::core_arch::macros::interleaving_store!(i8, 16, 3, a, b) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { - crate::core_arch::macros::interleaving_store!(i16, 4, 3, a, b) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { - crate::core_arch::macros::interleaving_store!(i16, 8, 3, a, b) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { - crate::core_arch::macros::interleaving_store!(i32, 2, 3, a, b) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst3))] -pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { - crate::core_arch::macros::interleaving_store!(i32, 4, 3, a, b) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { - crate::core_arch::macros::interleaving_store!(f32, 2, 3, a, b) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { - crate::core_arch::macros::interleaving_store!(f32, 4, 3, a, b) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { - crate::core_arch::macros::interleaving_store!(i8, 8, 3, a, b) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { - crate::core_arch::macros::interleaving_store!(i8, 16, 3, a, b) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { - crate::core_arch::macros::interleaving_store!(i16, 4, 3, a, b) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { - crate::core_arch::macros::interleaving_store!(i16, 8, 3, a, b) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { - crate::core_arch::macros::interleaving_store!(i32, 2, 3, a, b) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st3))] -pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { - crate::core_arch::macros::interleaving_store!(i32, 4, 3, a, b) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst3_lane_f16(a: *mut f16, b: float16x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v4f16")] - fn _vst3_lane_f16( - ptr: *mut i8, - a: float16x4_t, - b: float16x4_t, - c: float16x4_t, - n: i32, - size: i32, - ); - } - _vst3_lane_f16(a as _, b.0, b.1, b.2, LANE, 4) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst3q_lane_f16(a: *mut f16, b: float16x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v8f16")] - fn _vst3q_lane_f16( - ptr: *mut i8, - a: float16x8_t, - b: float16x8_t, - c: float16x8_t, - n: i32, - size: i32, - ); - } - _vst3q_lane_f16(a as _, b.0, b.1, b.2, LANE, 4) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst3_lane_f16(a: *mut f16, b: float16x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v4f16.p0" - )] - fn _vst3_lane_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t, n: i64, ptr: *mut i8); - } - _vst3_lane_f16(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst3q_lane_f16(a: *mut f16, b: float16x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v8f16.p0" - )] - fn _vst3q_lane_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t, n: i64, ptr: *mut i8); - } - _vst3q_lane_f16(b.0, b.1, b.2, LANE as i64, a as _) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v2f32")] - fn _vst3_lane_f32( - ptr: *mut i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - n: i32, - size: i32, - ); - } - _vst3_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { +pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v4f32")] - fn _vst3q_lane_f32( + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v4f32")] + fn _vst4q_lane_f32( ptr: *mut i8, a: float32x4_t, b: float32x4_t, c: float32x4_t, + d: float32x4_t, n: i32, size: i32, ); } - _vst3q_lane_f32(a as _, b.0, b.1, b.2, LANE, 4) + _vst4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { +pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v8i8")] - fn _vst3_lane_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i32, size: i32); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v8i8")] + fn _vst4_lane_s8( + ptr: *mut i8, + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + n: i32, + size: i32, + ); } - _vst3_lane_s8(a as _, b.0, b.1, b.2, LANE, 1) + _vst4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { +pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v4i16")] - fn _vst3_lane_s16( + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v4i16")] + fn _vst4_lane_s16( ptr: *mut i8, a: int16x4_t, b: int16x4_t, c: int16x4_t, + d: int16x4_t, n: i32, size: i32, ); } - _vst3_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) + _vst4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { +pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v8i16")] - fn _vst3q_lane_s16( + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v8i16")] + fn _vst4q_lane_s16( ptr: *mut i8, a: int16x8_t, b: int16x8_t, c: int16x8_t, + d: int16x8_t, n: i32, size: i32, ); } - _vst3q_lane_s16(a as _, b.0, b.1, b.2, LANE, 2) + _vst4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { +pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v2i32")] - fn _vst3_lane_s32( + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v2i32")] + fn _vst4_lane_s32( ptr: *mut i8, a: int32x2_t, b: int32x2_t, c: int32x2_t, + d: int32x2_t, n: i32, size: i32, ); } - _vst3_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) + _vst4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst3, LANE = 0))] +#[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { +pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0.v4i32")] - fn _vst3q_lane_s32( + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v4i32")] + fn _vst4q_lane_s32( ptr: *mut i8, a: int32x4_t, b: int32x4_t, c: int32x4_t, + d: int32x4_t, n: i32, size: i32, ); } - _vst3q_lane_s32(a as _, b.0, b.1, b.2, LANE, 4) + _vst4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { +pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { static_assert_uimm_bits!(LANE, 1); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v2f32.p0" + link_name = "llvm.aarch64.neon.st4lane.v2f32.p0" )] - fn _vst3_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t, n: i64, ptr: *mut i8); + fn _vst4_lane_f32( + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + d: float32x2_t, + n: i64, + ptr: *mut i8, + ); } - _vst3_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) + _vst4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { +pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v4f32.p0" + link_name = "llvm.aarch64.neon.st4lane.v4f32.p0" )] - fn _vst3q_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t, n: i64, ptr: *mut i8); + fn _vst4q_lane_f32( + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + d: float32x4_t, + n: i64, + ptr: *mut i8, + ); } - _vst3q_lane_f32(b.0, b.1, b.2, LANE as i64, a as _) + _vst4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { +pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v8i8.p0" + link_name = "llvm.aarch64.neon.st4lane.v8i8.p0" )] - fn _vst3_lane_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i64, ptr: *mut i8); + fn _vst4_lane_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, n: i64, ptr: *mut i8); } - _vst3_lane_s8(b.0, b.1, b.2, LANE as i64, a as _) + _vst4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] #[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { +pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { static_assert_uimm_bits!(LANE, 2); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v4i16.p0" + link_name = "llvm.aarch64.neon.st4lane.v4i16.p0" )] - fn _vst3_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t, n: i64, ptr: *mut i8); + fn _vst4_lane_s16( + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + d: int16x4_t, + n: i64, + ptr: *mut i8, + ); } - _vst3_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) + _vst4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4lane.v8i16.p0" + )] + fn _vst4q_lane_s16( + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + d: int16x8_t, + n: i64, + ptr: *mut i8, + ); + } + _vst4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4lane.v2i32.p0" + )] + fn _vst4_lane_s32( + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + d: int32x2_t, + n: i64, + ptr: *mut i8, + ); + } + _vst4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(st4, LANE = 0))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.st4lane.v4i32.p0" + )] + fn _vst4q_lane_s32( + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + d: int32x4_t, + n: i64, + ptr: *mut i8, + ); + } + _vst4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + vst4_lane_s8::(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + vst4_lane_s16::(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + vst4q_lane_s16::(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { + static_assert_uimm_bits!(LANE, 1); + vst4_lane_s32::(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + vst4q_lane_s32::(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + vst4_lane_s8::(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { + static_assert_uimm_bits!(LANE, 2); + vst4_lane_s16::(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4, LANE = 0) +)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { + static_assert_uimm_bits!(LANE, 3); + vst4q_lane_s16::(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,aes")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) { + vst4_s64(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[cfg(target_arch = "arm")] +#[target_feature(enable = "neon,v7")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { + core::ptr::write_unaligned(a.cast(), b) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg(not(target_arch = "arm"))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { + core::ptr::write_unaligned(a.cast(), b) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u64)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { + vst4_s64(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { + vst4_s8(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { + vst4q_s8(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { + vst4_s16(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { + vst4q_s16(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { + vst4_s32(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u32)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { + vst4q_s32(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { + vst4_s8(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p8)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { + vst4q_s8(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { + vst4_s16(transmute(a), transmute(b)) +} +#[doc = "Store multiple 4-element structures from four registers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p16)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(st4) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { + vst4q_s16(transmute(a), transmute(b)) +} +#[doc = "Store SIMD&FP register (immediate offset)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstrq_p128)"] +#[doc = "## Safety"] +#[doc = " * Neon intrinsic unsafe"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(nop) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub unsafe fn vstrq_p128(a: *mut p128, b: p128) { + *a = b +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fsub) +)] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "stdarch_neon_fp16", since = "1.94.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vsub_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f16)"] +#[inline] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fsub) +)] +#[target_feature(enable = "neon,fp16")] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "stdarch_neon_fp16", since = "1.94.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg(not(target_arch = "arm64ec"))] +pub fn vsubq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(fsub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u32)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u64)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u8)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(sub) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { simd_sub(a, b) } +} +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s16)"] +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr( + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(subhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { + let d = vsubhn_s16(b, c); + vcombine_s8(a, d) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s32)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v8i16.p0" - )] - fn _vst3q_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t, n: i64, ptr: *mut i8); - } - _vst3q_lane_s16(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr( + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(subhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { + let d = vsubhn_s32(b, c); + vcombine_s16(a, d) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s64)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v2i32.p0" - )] - fn _vst3_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t, n: i64, ptr: *mut i8); - } - _vst3_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr( + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(subhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { + let d = vsubhn_s64(b, c); + vcombine_s32(a, d) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st3, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st3lane.v4i32.p0" - )] - fn _vst3q_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64, ptr: *mut i8); - } - _vst3q_lane_s32(b.0, b.1, b.2, LANE as i64, a as _) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr( + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(subhn2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { + let d = vsubhn_u16(b, c); + vcombine_u8(a, d) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(subhn2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -63606,23 +65292,24 @@ pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - vst3_lane_s8::(transmute(a), transmute(b)) +pub fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { + let d = vsubhn_u32(b, c); + vcombine_u16(a, d) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(subhn2) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -63631,23 +65318,20 @@ pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - vst3_lane_s16::(transmute(a), transmute(b)) +pub fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { + let d = vsubhn_u64(b, c); + vcombine_u32(a, d) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) + assert_instr(subhn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -63656,23 +65340,20 @@ pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - vst3q_lane_s16::(transmute(a), transmute(b)) +pub fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { + let c: i16x8 = i16x8::new(8, 8, 8, 8, 8, 8, 8, 8); + unsafe { simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) + assert_instr(subhn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -63681,23 +65362,20 @@ pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { - static_assert_uimm_bits!(LANE, 1); - vst3_lane_s32::(transmute(a), transmute(b)) +pub fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { + let c: i32x4 = i32x4::new(16, 16, 16, 16); + unsafe { simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) + assert_instr(subhn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -63706,23 +65384,20 @@ pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - vst3q_lane_s32::(transmute(a), transmute(b)) +pub fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { + let c: i64x2 = i64x2::new(32, 32); + unsafe { simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) + assert_instr(subhn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -63731,23 +65406,20 @@ pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - vst3_lane_s8::(transmute(a), transmute(b)) +pub fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + let c: u16x8 = u16x8::new(8, 8, 8, 8, 8, 8, 8, 8); + unsafe { simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) + assert_instr(subhn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -63756,23 +65428,20 @@ pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { - static_assert_uimm_bits!(LANE, 2); - vst3_lane_s16::(transmute(a), transmute(b)) +pub fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + let c: u32x4 = u32x4::new(16, 16, 16, 16); + unsafe { simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Subtract returning high narrow"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u64)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3, LANE = 0) + assert_instr(subhn) )] -#[rustc_legacy_const_generics(2)] #[cfg_attr( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") @@ -63781,21 +65450,19 @@ pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { - static_assert_uimm_bits!(LANE, 3); - vst3q_lane_s16::(transmute(a), transmute(b)) +pub fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + let c: u64x2 = u64x2::new(32, 32); + unsafe { simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s8)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ssubl) )] #[cfg_attr( not(target_arch = "arm"), @@ -63805,44 +65472,47 @@ pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) { - vst3_s64(transmute(a), transmute(b)) +pub fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { + unsafe { + let c: int16x8_t = simd_cast(a); + let d: int16x8_t = simd_cast(b); + simd_sub(c, d) + } } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s16)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { - core::ptr::write_unaligned(a.cast(), b) -} -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { - core::ptr::write_unaligned(a.cast(), b) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(ssubl) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { + unsafe { + let c: int32x4_t = simd_cast(a); + let d: int32x4_t = simd_cast(b); + simd_sub(c, d) + } } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Signed Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(ssubl) )] #[cfg_attr( not(target_arch = "arm"), @@ -63852,20 +65522,22 @@ pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { - vst3_s64(transmute(a), transmute(b)) +pub fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { + unsafe { + let c: int64x2_t = simd_cast(a); + let d: int64x2_t = simd_cast(b); + simd_sub(c, d) + } } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(usubl) )] #[cfg_attr( not(target_arch = "arm"), @@ -63875,20 +65547,22 @@ pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { - vst3_s8(transmute(a), transmute(b)) +pub fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { + unsafe { + let c: uint16x8_t = simd_cast(a); + let d: uint16x8_t = simd_cast(b); + simd_sub(c, d) + } } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(usubl) )] #[cfg_attr( not(target_arch = "arm"), @@ -63898,20 +65572,22 @@ pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { - vst3q_s8(transmute(a), transmute(b)) +pub fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + unsafe { + let c: uint32x4_t = simd_cast(a); + let d: uint32x4_t = simd_cast(b); + simd_sub(c, d) + } } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Unsigned Subtract Long"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(usubl) )] #[cfg_attr( not(target_arch = "arm"), @@ -63921,20 +65597,22 @@ pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { - vst3_s16(transmute(a), transmute(b)) +pub fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + unsafe { + let c: uint64x2_t = simd_cast(a); + let d: uint64x2_t = simd_cast(b); + simd_sub(c, d) + } } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(ssubw) )] #[cfg_attr( not(target_arch = "arm"), @@ -63944,20 +65622,18 @@ pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { - vst3q_s16(transmute(a), transmute(b)) +pub fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { + unsafe { simd_sub(a, simd_cast(b)) } } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(ssubw) )] #[cfg_attr( not(target_arch = "arm"), @@ -63967,20 +65643,18 @@ pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { - vst3_s32(transmute(a), transmute(b)) +pub fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { + unsafe { simd_sub(a, simd_cast(b)) } } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Signed Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(ssubw) )] #[cfg_attr( not(target_arch = "arm"), @@ -63990,20 +65664,18 @@ pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { - vst3q_s32(transmute(a), transmute(b)) +pub fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { + unsafe { simd_sub(a, simd_cast(b)) } } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(usubw) )] #[cfg_attr( not(target_arch = "arm"), @@ -64013,20 +65685,18 @@ pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { - vst3_s8(transmute(a), transmute(b)) +pub fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { + unsafe { simd_sub(a, simd_cast(b)) } } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + assert_instr(usubw) )] #[cfg_attr( not(target_arch = "arm"), @@ -64036,1261 +65706,771 @@ pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { - vst3q_s8(transmute(a), transmute(b)) +pub fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { + unsafe { simd_sub(a, simd_cast(b)) } } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Unsigned Subtract Wide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usubw) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { + unsafe { simd_sub(a, simd_cast(b)) } +} +#[doc = "Dot product index form with signed and unsigned integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_lane_s32)"] +#[inline] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(sudot, LANE = 0) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { - vst3_s16(transmute(a), transmute(b)) +pub fn vsudot_lane_s32(a: int32x2_t, b: int8x8_t, c: uint8x8_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c = vreinterpret_u32_u8(c); + let c = vdup_lane_u32::(c); + vusdot_s32(a, vreinterpret_u8_u32(c), b) } -#[doc = "Store multiple 3-element structures from three registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Dot product index form with signed and unsigned integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_lane_s32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st3) + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(sudot, LANE = 0) )] +#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { - vst3q_s16(transmute(a), transmute(b)) +pub fn vsudotq_lane_s32(a: int32x4_t, b: int8x16_t, c: uint8x8_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c = vreinterpret_u32_u8(c); + let c = vdupq_lane_u32::(c); + vusdotq_s32(a, vreinterpretq_u8_u32(c), b) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Dot product index form with signed and unsigned integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4_f16(a: *mut f16, b: float16x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v4f16")] - fn _vst4_f16( - ptr: *mut i8, - a: float16x4_t, - b: float16x4_t, - c: float16x4_t, - d: float16x4_t, - size: i32, - ); - } - _vst4_f16(a as _, b.0, b.1, b.2, b.3, 2) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 1))] +#[cfg_attr( + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(sudot, LANE = 3) +)] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] +pub fn vsudot_laneq_s32(a: int32x2_t, b: int8x8_t, c: uint8x16_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let c = vreinterpretq_u32_u8(c); + let c = vdup_laneq_u32::(c); + vusdot_s32(a, vreinterpret_u8_u32(c), b) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Dot product index form with signed and unsigned integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4q_f16(a: *mut f16, b: float16x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v8f16")] - fn _vst4q_f16( - ptr: *mut i8, - a: float16x8_t, - b: float16x8_t, - c: float16x8_t, - d: float16x8_t, - size: i32, - ); - } - _vst4q_f16(a as _, b.0, b.1, b.2, b.3, 2) +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 1))] +#[cfg_attr( + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(sudot, LANE = 3) +)] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] +pub fn vsudotq_laneq_s32(a: int32x4_t, b: int8x16_t, c: uint8x16_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let c = vreinterpretq_u32_u8(c); + let c = vdupq_laneq_u32::(c); + vusdotq_s32(a, vreinterpretq_u8_u32(c), b) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4_f16(a: *mut f16, b: float16x4x4_t) { +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +fn vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v4f16.p0" - )] - fn _vst4_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t, d: float16x4_t, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl1")] + fn _vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t; } - _vst4_f16(b.0, b.1, b.2, b.3, a as _) + unsafe { _vtbl1(a, b) } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_f16(a: *mut f16, b: float16x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4.v8f16.p0" - )] - fn _vst4q_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t, d: float16x8_t, ptr: *mut i8); - } - _vst4q_f16(b.0, b.1, b.2, b.3, a as _) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] #[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v2f32")] - fn _vst4_f32( - ptr: *mut i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - size: i32, - ); - } - _vst4_f32(a as _, b.0, b.1, b.2, b.3, 4) +#[cfg_attr(test, assert_instr(vtbl))] +pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + vtbl1(a, b) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] #[inline] +#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v4f32")] - fn _vst4q_f32( - ptr: *mut i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - size: i32, - ); - } - _vst4q_f32(a as _, b.0, b.1, b.2, b.3, 4) +#[cfg_attr(test, assert_instr(vtbl))] +pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { transmute(vtbl1(transmute(a), transmute(b))) } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] #[inline] +#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v8i8")] - fn _vst4_s8(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, size: i32); - } - _vst4_s8(a as _, b.0, b.1, b.2, b.3, 1) +#[cfg_attr(test, assert_instr(vtbl))] +pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { + unsafe { transmute(vtbl1(transmute(a), transmute(b))) } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2)"] #[inline] +#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { +#[cfg_attr(test, assert_instr(vtbl))] +fn vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v16i8")] - fn _vst4q_s8( - ptr: *mut i8, - a: int8x16_t, - b: int8x16_t, - c: int8x16_t, - d: int8x16_t, - size: i32, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl2")] + fn _vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; } - _vst4q_s8(a as _, b.0, b.1, b.2, b.3, 1) + unsafe { _vtbl2(a, b, c) } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] #[inline] +#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v4i16")] - fn _vst4_s16( - ptr: *mut i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - size: i32, - ); - } - _vst4_s16(a as _, b.0, b.1, b.2, b.3, 2) +#[cfg_attr(test, assert_instr(vtbl))] +pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { + vtbl2(a.0, a.1, b) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] #[inline] +#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v8i16")] - fn _vst4q_s16( - ptr: *mut i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - size: i32, - ); - } - _vst4q_s16(a as _, b.0, b.1, b.2, b.3, 2) +#[cfg_attr(test, assert_instr(vtbl))] +pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { + unsafe { transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))) } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] #[inline] +#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v2i32")] - fn _vst4_s32( - ptr: *mut i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - size: i32, - ); - } - _vst4_s32(a as _, b.0, b.1, b.2, b.3, 4) +#[cfg_attr(test, assert_instr(vtbl))] +pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { + unsafe { transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))) } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3)"] #[inline] +#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vst4))] -pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { +#[cfg_attr(test, assert_instr(vtbl))] +fn vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0.v4i32")] - fn _vst4q_s32( - ptr: *mut i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - size: i32, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl3")] + fn _vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; } - _vst4q_s32(a as _, b.0, b.1, b.2, b.3, 4) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { - crate::core_arch::macros::interleaving_store!(f32, 2, 4, a, b) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { - crate::core_arch::macros::interleaving_store!(f32, 4, 4, a, b) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { - crate::core_arch::macros::interleaving_store!(i8, 8, 4, a, b) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { - crate::core_arch::macros::interleaving_store!(i8, 16, 4, a, b) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { - crate::core_arch::macros::interleaving_store!(i16, 4, 4, a, b) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { - crate::core_arch::macros::interleaving_store!(i16, 8, 4, a, b) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { - crate::core_arch::macros::interleaving_store!(i32, 2, 4, a, b) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(st4))] -pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { - crate::core_arch::macros::interleaving_store!(i32, 4, 4, a, b) + unsafe { _vtbl3(a, b, c, d) } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst4_lane_f16(a: *mut f16, b: float16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v4f16")] - fn _vst4_lane_f16( - ptr: *mut i8, - a: float16x4_t, - b: float16x4_t, - c: float16x4_t, - d: float16x4_t, - n: i32, - size: i32, - ); - } - _vst4_lane_f16(a as _, b.0, b.1, b.2, b.3, LANE, 2) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { + vtbl3(a.0, a.1, a.2, b) +} +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst4q_lane_f16(a: *mut f16, b: float16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v8f16")] - fn _vst4q_lane_f16( - ptr: *mut i8, - a: float16x8_t, - b: float16x8_t, - c: float16x8_t, - d: float16x8_t, - n: i32, - size: i32, - ); +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { + unsafe { + transmute(vtbl3( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(b), + )) } - _vst4q_lane_f16(a as _, b.0, b.1, b.2, b.3, LANE, 2) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst4_lane_f16(a: *mut f16, b: float16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v4f16.p0" - )] - fn _vst4_lane_f16( - a: float16x4_t, - b: float16x4_t, - c: float16x4_t, - d: float16x4_t, - n: i64, - ptr: *mut i8, - ); +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { + unsafe { + transmute(vtbl3( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(b), + )) } - _vst4_lane_f16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4)"] #[inline] #[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] -#[unstable(feature = "stdarch_neon_f16", issue = "136306")] -#[cfg(not(target_arch = "arm64ec"))] -pub unsafe fn vst4q_lane_f16(a: *mut f16, b: float16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); +#[cfg(target_arch = "arm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbl))] +fn vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v8f16.p0" - )] - fn _vst4q_lane_f16( - a: float16x8_t, - b: float16x8_t, - c: float16x8_t, - d: float16x8_t, - n: i64, - ptr: *mut i8, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl4")] + fn _vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; } - _vst4q_lane_f16(b.0, b.1, b.2, b.3, LANE as i64, a as _) + unsafe { _vtbl4(a, b, c, d, e) } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] #[inline] +#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v2f32")] - fn _vst4_lane_f32( - ptr: *mut i8, - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i32, - size: i32, - ); - } - _vst4_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) +#[cfg_attr(test, assert_instr(vtbl))] +pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { + vtbl4(a.0, a.1, a.2, a.3, b) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] #[inline] +#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v4f32")] - fn _vst4q_lane_f32( - ptr: *mut i8, - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i32, - size: i32, - ); +#[cfg_attr(test, assert_instr(vtbl))] +pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { + unsafe { + transmute(vtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + transmute(b), + )) } - _vst4q_lane_f32(a as _, b.0, b.1, b.2, b.3, LANE, 4) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] #[inline] +#[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v8i8")] - fn _vst4_lane_s8( - ptr: *mut i8, - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - n: i32, - size: i32, - ); +#[cfg_attr(test, assert_instr(vtbl))] +pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { + unsafe { + transmute(vtbl4( + transmute(a.0), + transmute(a.1), + transmute(a.2), + transmute(a.3), + transmute(b), + )) } - _vst4_lane_s8(a as _, b.0, b.1, b.2, b.3, LANE, 1) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1)"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); +#[cfg_attr(test, assert_instr(vtbx))] +fn vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v4i16")] - fn _vst4_lane_s16( - ptr: *mut i8, - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i32, - size: i32, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx1")] + fn _vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; } - _vst4_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) + unsafe { _vtbx1(a, b, c) } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] #[inline] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + vtbx1(a, b, c) +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] +#[inline] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v8i16")] - fn _vst4q_lane_s16( - ptr: *mut i8, - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i32, - size: i32, - ); - } - _vst4q_lane_s16(a as _, b.0, b.1, b.2, b.3, LANE, 2) +#[cfg_attr(test, assert_instr(vtbx))] +pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + unsafe { transmute(vtbx1(transmute(a), transmute(b), transmute(c))) } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] #[inline] +#[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { + unsafe { transmute(vtbx1(transmute(a), transmute(b), transmute(c))) } +} +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2)"] +#[inline] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); +#[cfg_attr(test, assert_instr(vtbx))] +fn vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v2i32")] - fn _vst4_lane_s32( - ptr: *mut i8, - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i32, - size: i32, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx2")] + fn _vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; } - _vst4_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) + unsafe { _vtbx2(a, b, c, d) } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] #[inline] -#[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr(vst4, LANE = 0))] -#[rustc_legacy_const_generics(2)] +#[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0.v4i32")] - fn _vst4q_lane_s32( - ptr: *mut i8, - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i32, - size: i32, - ); - } - _vst4q_lane_s32(a as _, b.0, b.1, b.2, b.3, LANE, 4) +#[cfg_attr(test, assert_instr(vtbx))] +pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { + vtbx2(a, b.0, b.1, c) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v2f32.p0" - )] - fn _vst4_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - d: float32x2_t, - n: i64, - ptr: *mut i8, - ); +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { + unsafe { + transmute(vtbx2( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(c), + )) } - _vst4_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v4f32.p0" - )] - fn _vst4q_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - d: float32x4_t, - n: i64, - ptr: *mut i8, - ); +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { + unsafe { + transmute(vtbx2( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(c), + )) } - _vst4q_lane_f32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { - static_assert_uimm_bits!(LANE, 3); +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +fn vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v8i8.p0" - )] - fn _vst4_lane_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, n: i64, ptr: *mut i8); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx3")] + fn _vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; } - _vst4_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _) + unsafe { _vtbx3(a, b, c, d, e) } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v4i16.p0" - )] - fn _vst4_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - d: int16x4_t, - n: i64, - ptr: *mut i8, - ); - } - _vst4_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { + vtbx3(a, b.0, b.1, b.2, c) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v8i16.p0" - )] - fn _vst4q_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - d: int16x8_t, - n: i64, - ptr: *mut i8, - ); +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { + unsafe { + transmute(vtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(c), + )) } - _vst4q_lane_s16(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v2i32.p0" - )] - fn _vst4_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - d: int32x2_t, - n: i64, - ptr: *mut i8, - ); +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { + unsafe { + transmute(vtbx3( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(c), + )) } - _vst4_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[rustc_legacy_const_generics(2)] -#[cfg_attr(test, assert_instr(st4, LANE = 0))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +fn vtbx4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t, f: int8x8_t) -> int8x8_t { unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.st4lane.v4i32.p0" - )] - fn _vst4q_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - d: int32x4_t, - n: i64, - ptr: *mut i8, - ); + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx4")] + fn _vtbx4( + a: int8x8_t, + b: int8x8_t, + c: int8x8_t, + d: int8x8_t, + e: int8x8_t, + f: int8x8_t, + ) -> int8x8_t; } - _vst4q_lane_s32(b.0, b.1, b.2, b.3, LANE as i64, a as _) + unsafe { _vtbx4(a, b, c, d, e, f) } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - vst4_lane_s8::(transmute(a), transmute(b)) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { + unsafe { + vtbx4( + a, + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + ) + } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - vst4_lane_s16::(transmute(a), transmute(b)) +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { + let mut b: int8x8x4_t = b; + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: int8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int8x8_t = vtbx4( + a, + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + c, + ); + simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) + } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - vst4q_lane_s16::(transmute(a), transmute(b)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { + unsafe { + transmute(vtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + transmute(c), + )) + } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Extended table look-up"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] #[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { - static_assert_uimm_bits!(LANE, 1); - vst4_lane_s32::(transmute(a), transmute(b)) +#[target_feature(enable = "neon,v7")] +#[cfg(target_arch = "arm")] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] +#[cfg_attr(test, assert_instr(vtbx))] +pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { + unsafe { + transmute(vtbx4( + transmute(a), + transmute(b.0), + transmute(b.1), + transmute(b.2), + transmute(b.3), + transmute(c), + )) + } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(trn1) )] -pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - vst4q_lane_s32::(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) + assert_instr(trn2) )] -#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - vst4_lane_s8::(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm64ec"))] +pub fn vtrn_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { + unsafe { + let a1: float16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: float16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) + } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) -)] -#[rustc_legacy_const_generics(2)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(trn1) )] -pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { - static_assert_uimm_bits!(LANE, 2); - vst4_lane_s16::(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4, LANE = 0) + assert_instr(trn2) )] -#[rustc_legacy_const_generics(2)] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { - static_assert_uimm_bits!(LANE, 3); - vst4q_lane_s16::(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm64ec"))] +pub fn vtrn_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a1: float16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: float16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + let mut ret_val: float16x4x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val + } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,aes")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(trn1) )] -pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) { - vst4_s64(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[cfg(target_arch = "arm")] -#[target_feature(enable = "neon,v7")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { - core::ptr::write_unaligned(a.cast(), b) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(not(target_arch = "arm"))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -#[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { - core::ptr::write_unaligned(a.cast(), b) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u64)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(trn2) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { - vst4_s64(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm64ec"))] +pub fn vtrnq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { + unsafe { + let a1: float16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: float16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) + } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(trn1) )] -pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { - vst4_s8(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(trn2) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { - vst4q_s8(transmute(a), transmute(b)) +#[cfg(not(target_arch = "arm64ec"))] +pub fn vtrnq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a1: float16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: float16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + let mut ret_val: float16x8x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val + } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(zip1) )] -pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { - vst4_s16(transmute(a), transmute(b)) -} -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65300,20 +66480,27 @@ pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { - vst4q_s16(transmute(a), transmute(b)) +pub fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { + unsafe { + let a1: float32x2_t = simd_shuffle!(a, b, [0, 2]); + let b1: float32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a1, b1)) + } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65323,20 +66510,32 @@ pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { - vst4_s32(transmute(a), transmute(b)) +pub fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let a1: float32x2_t = simd_shuffle!(a, b, [0, 2]); + let b1: float32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: float32x2x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val + } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u32)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65346,20 +66545,27 @@ pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { - vst4q_s32(transmute(a), transmute(b)) +pub fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { + unsafe { + let a1: int32x2_t = simd_shuffle!(a, b, [0, 2]); + let b1: int32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a1, b1)) + } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65369,20 +66575,32 @@ pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { - vst4_s8(transmute(a), transmute(b)) +pub fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int32x2_t = simd_shuffle!(b, b, [1, 0]); + let a1: int32x2_t = simd_shuffle!(a, b, [0, 2]); + let b1: int32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: int32x2x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val + } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p8)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65392,20 +66610,27 @@ pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { - vst4q_s8(transmute(a), transmute(b)) +pub fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { + unsafe { + let a1: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + let b1: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a1, b1)) + } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65415,20 +66640,32 @@ pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { - vst4_s16(transmute(a), transmute(b)) +pub fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { + unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint32x2_t = simd_shuffle!(b, b, [1, 0]); + let a1: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + let b1: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: uint32x2x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val + } } -#[doc = "Store multiple 4-element structures from four registers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p16)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(st4) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65438,20 +66675,27 @@ pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { - vst4q_s16(transmute(a), transmute(b)) +pub fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { + unsafe { + let a1: float32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: float32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) + } } -#[doc = "Store SIMD&FP register (immediate offset)"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstrq_p128)"] -#[doc = "## Safety"] -#[doc = " * Neon intrinsic unsafe"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(nop) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65461,62 +66705,62 @@ pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub unsafe fn vstrq_p128(a: *mut p128, b: p128) { - *a = b +pub fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a1: float32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: float32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + let mut ret_val: float32x4x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f16)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s8)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f16"))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fsub) -)] -#[target_feature(enable = "neon,fp16")] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(trn1) )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vsub_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { - unsafe { simd_sub(a, b) } -} -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f16)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fsub) + assert_instr(trn2) )] -#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vsubq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { - unsafe { simd_sub(a, b) } +pub fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { + unsafe { + let a1: int8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: int8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fsub) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65526,18 +66770,32 @@ pub fn vsubq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { - unsafe { simd_sub(a, b) } +pub fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a1: int8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: int8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + let mut ret_val: int8x8x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(fsub) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65547,18 +66805,35 @@ pub fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { - unsafe { simd_sub(a, b) } +pub fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { + unsafe { + let a1: int8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + let b1: int8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + transmute((a1, b1)) + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s16)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65568,18 +66843,50 @@ pub fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { - unsafe { simd_sub(a, b) } +pub fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let a1: int8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + let b1: int8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + let mut ret_val: int8x16x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s16)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65589,18 +66896,27 @@ pub fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { - unsafe { simd_sub(a, b) } +pub fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { + unsafe { + let a1: int16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: int16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u16)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65610,18 +66926,32 @@ pub fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - unsafe { simd_sub(a, b) } +pub fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { + unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a1: int16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: int16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + let mut ret_val: int16x4x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u16)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65631,18 +66961,27 @@ pub fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { - unsafe { simd_sub(a, b) } +pub fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { + unsafe { + let a1: int16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: int16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65652,18 +66991,32 @@ pub fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { - unsafe { simd_sub(a, b) } +pub fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { + unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a1: int16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: int16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + let mut ret_val: int16x8x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65673,18 +67026,27 @@ pub fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { - unsafe { simd_sub(a, b) } +pub fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { + unsafe { + let a1: int32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: int32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65694,18 +67056,32 @@ pub fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - unsafe { simd_sub(a, b) } +pub fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { + unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a1: int32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: int32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + let mut ret_val: int32x4x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65715,18 +67091,27 @@ pub fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { - unsafe { simd_sub(a, b) } +pub fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { + unsafe { + let a1: uint8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: uint8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s64)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65736,18 +67121,32 @@ pub fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { - unsafe { simd_sub(a, b) } +pub fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a1: uint8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: uint8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + let mut ret_val: uint8x8x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s64)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65757,18 +67156,35 @@ pub fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { - unsafe { simd_sub(a, b) } +pub fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { + unsafe { + let a1: uint8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + let b1: uint8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + transmute((a1, b1)) + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u64)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65778,39 +67194,50 @@ pub fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { - unsafe { simd_sub(a, b) } +pub fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let a1: uint8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + let b1: uint8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + let mut ret_val: uint8x16x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u64)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") + assert_instr(trn1) )] -pub fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { - unsafe { simd_sub(a, b) } -} -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65820,18 +67247,27 @@ pub fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe { simd_sub(a, b) } +pub fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { + unsafe { + let a1: uint16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: uint16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s8)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65841,18 +67277,32 @@ pub fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { - unsafe { simd_sub(a, b) } +pub fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a1: uint16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: uint16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + let mut ret_val: uint16x4x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u8)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65862,18 +67312,27 @@ pub fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { simd_sub(a, b) } +pub fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { + unsafe { + let a1: uint16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: uint16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) + } } -#[doc = "Subtract"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u8)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(sub) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65883,22 +67342,32 @@ pub fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { - unsafe { simd_sub(a, b) } +pub fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { + unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a1: uint16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: uint16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + let mut ret_val: uint16x8x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val + } } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s16)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(subhn2) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65908,23 +67377,27 @@ pub fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { - let d = vsubhn_s16(b, c); - vcombine_s8(a, d) +pub fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { + unsafe { + let a1: uint32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: uint32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) + } } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(subhn2) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65934,23 +67407,32 @@ pub fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { - let d = vsubhn_s32(b, c); - vcombine_s16(a, d) +pub fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a1: uint32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: uint32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + let mut ret_val: uint32x4x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val + } } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s64)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(subhn2) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65960,23 +67442,27 @@ pub fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { - let d = vsubhn_s64(b, c); - vcombine_s32(a, d) +pub fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { + unsafe { + let a1: poly8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: poly8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) + } } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u16)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(subhn2) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -65986,23 +67472,32 @@ pub fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { - let d = vsubhn_u16(b, c); - vcombine_u8(a, d) +pub fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a1: poly8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: poly8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + let mut ret_val: poly8x8x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val + } } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(subhn2) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -66012,23 +67507,35 @@ pub fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { - let d = vsubhn_u32(b, c); - vcombine_u16(a, d) +pub fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { + unsafe { + let a1: poly8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + let b1: poly8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + transmute((a1, b1)) + } } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u64)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(subhn2) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -66038,19 +67545,50 @@ pub fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { - let d = vsubhn_u64(b, c); - vcombine_u32(a, d) +pub fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let a1: poly8x16_t = simd_shuffle!( + a, + b, + [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + ); + let b1: poly8x16_t = simd_shuffle!( + a, + b, + [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + ); + let mut ret_val: poly8x16x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val + } } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s16)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -66060,19 +67598,27 @@ pub fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_ target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { - let c: i16x8 = i16x8::new(8, 8, 8, 8, 8, 8, 8, 8); - unsafe { simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } +pub fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { + unsafe { + let a1: poly16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: poly16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + transmute((a1, b1)) + } } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s32)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -66082,19 +67628,32 @@ pub fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { - let c: i32x4 = i32x4::new(16, 16, 16, 16); - unsafe { simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } +pub fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { + unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: poly16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a1: poly16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); + let b1: poly16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); + let mut ret_val: poly16x4x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val + } } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s64)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -66104,19 +67663,27 @@ pub fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { - let c: i64x2 = i64x2::new(32, 32); - unsafe { simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } +pub fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { + unsafe { + let a1: poly16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: poly16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + transmute((a1, b1)) + } } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u16)"] +#[doc = "Transpose elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(trn1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(trn2) )] #[cfg_attr( not(target_arch = "arm"), @@ -66126,19 +67693,27 @@ pub fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { - let c: u16x8 = u16x8::new(8, 8, 8, 8, 8, 8, 8, 8); - unsafe { simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } +pub fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { + unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a1: poly16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); + let b1: poly16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); + let mut ret_val: poly16x8x2_t = transmute((a1, b1)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val + } } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u32)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -66148,19 +67723,22 @@ pub fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { - let c: u32x4 = u32x4::new(16, 16, 16, 16); - unsafe { simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } +pub fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe { + let c: int8x8_t = simd_and(a, b); + let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) + } } -#[doc = "Subtract returning high narrow"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u64)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(subhn) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -66170,19 +67748,22 @@ pub fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { - let c: u64x2 = u64x2::new(32, 32); - unsafe { simd_cast(simd_shr(simd_sub(a, b), transmute(c))) } +pub fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe { + let c: int8x16_t = simd_and(a, b); + let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) + } } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s8)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubl) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -66192,22 +67773,22 @@ pub fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { +pub fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { unsafe { - let c: int16x8_t = simd_cast(a); - let d: int16x8_t = simd_cast(b); - simd_sub(c, d) + let c: int16x4_t = simd_and(a, b); + let d: i16x4 = i16x4::new(0, 0, 0, 0); + simd_ne(c, transmute(d)) } } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s16)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubl) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -66217,22 +67798,22 @@ pub fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { +pub fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { unsafe { - let c: int32x4_t = simd_cast(a); - let d: int32x4_t = simd_cast(b); - simd_sub(c, d) + let c: int16x8_t = simd_and(a, b); + let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } } -#[doc = "Signed Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s32)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubl) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -66242,22 +67823,22 @@ pub fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { +pub fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { unsafe { - let c: int64x2_t = simd_cast(a); - let d: int64x2_t = simd_cast(b); - simd_sub(c, d) + let c: int32x2_t = simd_and(a, b); + let d: i32x2 = i32x2::new(0, 0); + simd_ne(c, transmute(d)) } } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u8)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s32)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubl) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -66267,22 +67848,22 @@ pub fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { +pub fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { unsafe { - let c: uint16x8_t = simd_cast(a); - let d: uint16x8_t = simd_cast(b); - simd_sub(c, d) + let c: int32x4_t = simd_and(a, b); + let d: i32x4 = i32x4::new(0, 0, 0, 0); + simd_ne(c, transmute(d)) } } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u16)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubl) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -66292,22 +67873,22 @@ pub fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { +pub fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { unsafe { - let c: uint32x4_t = simd_cast(a); - let d: uint32x4_t = simd_cast(b); - simd_sub(c, d) + let c: poly8x8_t = simd_and(a, b); + let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } } -#[doc = "Unsigned Subtract Long"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u32)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubl) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -66317,22 +67898,22 @@ pub fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { +pub fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { unsafe { - let c: uint64x2_t = simd_cast(a); - let d: uint64x2_t = simd_cast(b); - simd_sub(c, d) + let c: poly8x16_t = simd_and(a, b); + let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) } } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s8)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubw) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -66342,18 +67923,22 @@ pub fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { - unsafe { simd_sub(a, simd_cast(b)) } +pub fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { + unsafe { + let c: poly16x4_t = simd_and(a, b); + let d: i16x4 = i16x4::new(0, 0, 0, 0); + simd_ne(c, transmute(d)) + } } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s16)"] +#[doc = "Signed compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubw) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -66363,18 +67948,22 @@ pub fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { - unsafe { simd_sub(a, simd_cast(b)) } +pub fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { + unsafe { + let c: poly16x8_t = simd_and(a, b); + let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) + } } -#[doc = "Signed Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s32)"] +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(ssubw) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -66384,18 +67973,22 @@ pub fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { - unsafe { simd_sub(a, simd_cast(b)) } +pub fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { + let c: uint8x8_t = simd_and(a, b); + let d: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) + } } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u8)"] +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u8)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubw) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -66405,18 +67998,22 @@ pub fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { - unsafe { simd_sub(a, simd_cast(b)) } +pub fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { + let c: uint8x16_t = simd_and(a, b); + let d: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) + } } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u16)"] +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubw) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -66426,18 +68023,22 @@ pub fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { - unsafe { simd_sub(a, simd_cast(b)) } +pub fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { + let c: uint16x4_t = simd_and(a, b); + let d: u16x4 = u16x4::new(0, 0, 0, 0); + simd_ne(c, transmute(d)) + } } -#[doc = "Unsigned Subtract Wide"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u32)"] +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u16)"] #[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usubw) + assert_instr(cmtst) )] #[cfg_attr( not(target_arch = "arm"), @@ -66447,791 +68048,476 @@ pub fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { - unsafe { simd_sub(a, simd_cast(b)) } +pub fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { + let c: uint16x8_t = simd_and(a, b); + let d: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); + simd_ne(c, transmute(d)) + } } -#[doc = "Dot product index form with signed and unsigned integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_lane_s32)"] +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u32)"] #[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(sudot, LANE = 0) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmtst) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsudot_lane_s32(a: int32x2_t, b: int8x8_t, c: uint8x8_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let c = vreinterpret_u32_u8(c); - let c = vdup_lane_u32::(c); - vusdot_s32(a, vreinterpret_u8_u32(c), b) +pub fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { + let c: uint32x2_t = simd_and(a, b); + let d: u32x2 = u32x2::new(0, 0); + simd_ne(c, transmute(d)) + } } -#[doc = "Dot product index form with signed and unsigned integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_lane_s32)"] +#[doc = "Unsigned compare bitwise Test bits nonzero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u32)"] #[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(sudot, LANE = 0) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(cmtst) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vsudotq_lane_s32(a: int32x4_t, b: int8x16_t, c: uint8x8_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let c = vreinterpret_u32_u8(c); - let c = vdupq_lane_u32::(c); - vusdotq_s32(a, vreinterpretq_u8_u32(c), b) -} -#[doc = "Dot product index form with signed and unsigned integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 1))] -#[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(sudot, LANE = 3) -)] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] -pub fn vsudot_laneq_s32(a: int32x2_t, b: int8x8_t, c: uint8x16_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let c = vreinterpretq_u32_u8(c); - let c = vdup_laneq_u32::(c); - vusdot_s32(a, vreinterpret_u8_u32(c), b) -} -#[doc = "Dot product index form with signed and unsigned integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 1))] -#[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(sudot, LANE = 3) -)] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] -pub fn vsudotq_laneq_s32(a: int32x4_t, b: int8x16_t, c: uint8x16_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let c = vreinterpretq_u32_u8(c); - let c = vdupq_laneq_u32::(c); - vusdotq_s32(a, vreinterpretq_u8_u32(c), b) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -fn vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl1")] - fn _vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t; - } - unsafe { _vtbl1(a, b) } -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { - vtbl1(a, b) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - unsafe { transmute(vtbl1(transmute(a), transmute(b))) } -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { - unsafe { transmute(vtbl1(transmute(a), transmute(b))) } -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -fn vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl2")] - fn _vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; - } - unsafe { _vtbl2(a, b, c) } -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { - vtbl2(a.0, a.1, b) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { - unsafe { transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))) } -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { - unsafe { transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))) } -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -fn vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl3")] - fn _vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; - } - unsafe { _vtbl3(a, b, c, d) } -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { - vtbl3(a.0, a.1, a.2, b) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { - unsafe { - transmute(vtbl3( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(b), - )) - } -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { - unsafe { - transmute(vtbl3( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(b), - )) - } -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -fn vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbl4")] - fn _vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; - } - unsafe { _vtbl4(a, b, c, d, e) } -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { - vtbl4(a.0, a.1, a.2, a.3, b) -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { - unsafe { - transmute(vtbl4( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - transmute(b), - )) - } -} -#[doc = "Table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] -#[inline] -#[target_feature(enable = "neon")] -#[cfg(target_arch = "arm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbl))] -pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { - unsafe { - transmute(vtbl4( - transmute(a.0), - transmute(a.1), - transmute(a.2), - transmute(a.3), - transmute(b), - )) - } -} -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1)"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -fn vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx1")] - fn _vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; - } - unsafe { _vtbx1(a, b, c) } -} -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { - vtbx1(a, b, c) -} -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - unsafe { transmute(vtbx1(transmute(a), transmute(b), transmute(c))) } -} -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { - unsafe { transmute(vtbx1(transmute(a), transmute(b), transmute(c))) } -} -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2)"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -fn vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx2")] - fn _vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t; - } - unsafe { _vtbx2(a, b, c, d) } -} -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { - vtbx2(a, b.0, b.1, c) -} -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { - unsafe { - transmute(vtbx2( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(c), - )) - } -} -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { +pub fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { unsafe { - transmute(vtbx2( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(c), - )) - } -} -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3)"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -fn vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx3")] - fn _vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int8x8_t; + let c: uint32x4_t = simd_and(a, b); + let d: u32x4 = u32x4::new(0, 0, 0, 0); + simd_ne(c, transmute(d)) } - unsafe { _vtbx3(a, b, c, d, e) } } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] +#[doc = "Dot product index form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_lane_s32)"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { - vtbx3(a, b.0, b.1, b.2, c) +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usdot, LANE = 0) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_i8mm", issue = "117223") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vusdot_lane_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 1); + let c = vreinterpret_s32_s8(c); + let c = vdup_lane_s32::(c); + vusdot_s32(a, b, vreinterpret_s8_s32(c)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] +#[doc = "Dot product index form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_lane_s32)"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { - unsafe { - transmute(vtbx3( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(c), - )) - } +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usdot, LANE = 0) +)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_i8mm", issue = "117223") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vusdotq_lane_s32(a: int32x4_t, b: uint8x16_t, c: int8x8_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 1); + let c = vreinterpret_s32_s8(c); + let c = vdupq_lane_s32::(c); + vusdotq_s32(a, b, vreinterpretq_s8_s32(c)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] +#[doc = "Dot product index form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { - unsafe { - transmute(vtbx3( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(c), - )) - } +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usdot, LANE = 3) +)] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] +pub fn vusdot_laneq_s32(a: int32x2_t, b: uint8x8_t, c: int8x16_t) -> int32x2_t { + static_assert_uimm_bits!(LANE, 2); + let c = vreinterpretq_s32_s8(c); + let c = vdup_laneq_s32::(c); + vusdot_s32(a, b, vreinterpret_s8_s32(c)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4)"] +#[doc = "Dot product index form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -fn vtbx4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t, f: int8x8_t) -> int8x8_t { - unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vtbx4")] - fn _vtbx4( - a: int8x8_t, - b: int8x8_t, - c: int8x8_t, - d: int8x8_t, - e: int8x8_t, - f: int8x8_t, - ) -> int8x8_t; - } - unsafe { _vtbx4(a, b, c, d, e, f) } +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 3))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(usdot, LANE = 3) +)] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] +pub fn vusdotq_laneq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { + static_assert_uimm_bits!(LANE, 2); + let c = vreinterpretq_s32_s8(c); + let c = vdupq_laneq_s32::(c); + vusdotq_s32(a, b, vreinterpretq_s8_s32(c)) } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] +#[doc = "Dot product vector form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_s32)"] #[inline] #[cfg(target_endian = "little")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { - unsafe { - vtbx4( - a, - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - c, - ) +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] +#[cfg_attr( + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(usdot) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_i8mm", issue = "117223") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usdot.v2i32.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v2i32.v8i8")] + fn _vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t; } + unsafe { _vusdot_s32(a, b, c) } } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] +#[doc = "Dot product vector form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_s32)"] #[inline] #[cfg(target_endian = "big")] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { - let mut b: int8x8x4_t = b; - unsafe { - let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); - b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); - b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); - b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); - b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); - let c: int8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); - let ret_val: int8x8_t = vtbx4( - a, - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - c, - ); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] +#[cfg_attr( + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(usdot) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_i8mm", issue = "117223") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usdot.v2i32.v8i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v2i32.v8i8")] + fn _vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t; } -} -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] -#[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { unsafe { - transmute(vtbx4( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - transmute(c), - )) + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: int8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x2_t = _vusdot_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [1, 0]) } } -#[doc = "Extended table look-up"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] +#[doc = "Dot product vector form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_s32)"] #[inline] -#[target_feature(enable = "neon,v7")] -#[cfg(target_arch = "arm")] -#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] -#[cfg_attr(test, assert_instr(vtbx))] -pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { - unsafe { - transmute(vtbx4( - transmute(a), - transmute(b.0), - transmute(b.1), - transmute(b.2), - transmute(b.3), - transmute(c), - )) +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] +#[cfg_attr( + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(usdot) +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_i8mm", issue = "117223") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usdot.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v4i32.v16i8")] + fn _vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t; } + unsafe { _vusdotq_s32(a, b, c) } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f16)"] +#[doc = "Dot product vector form with unsigned and signed integers"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_s32)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn1) -)] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] #[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn2) + all( + test, + any(target_arch = "aarch64", target_arch = "arm64ec"), + target_endian = "little" + ), + assert_instr(usdot) )] -#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vtrn_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { +pub fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usdot.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v4i32.v16i8")] + fn _vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t; + } unsafe { - let a1: float16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: float16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let c: int8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let ret_val: int32x4_t = _vusdotq_s32(a, b, c); + simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f16)"] +#[doc = "Unsigned and signed 8-bit integer matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusmmlaq_s32)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn1) -)] +#[target_feature(enable = "neon,i8mm")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn2) + assert_instr(usmmla) )] -#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + unstable(feature = "stdarch_neon_i8mm", issue = "117223") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vtrnq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { - unsafe { - let a1: float16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: float16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) +pub fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + any(target_arch = "aarch64", target_arch = "arm64ec"), + link_name = "llvm.aarch64.neon.usmmla.v4i32.v16i8" + )] + #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usmmla.v4i32.v16i8")] + fn _vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t; } + unsafe { _vusmmlaq_s32(a, b, c) } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip1) + assert_instr(uzp1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip2) + assert_instr(uzp2) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vuzp_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { unsafe { - let a1: float32x2_t = simd_shuffle!(a, b, [0, 2]); - let b1: float32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a1, b1)) + let a0: float16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: float16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + transmute((a0, b0)) } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip1) + assert_instr(uzp1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip2) + assert_instr(uzp2) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vuzp_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { unsafe { - let a1: int32x2_t = simd_shuffle!(a, b, [0, 2]); - let b1: int32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a1, b1)) + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a0: float16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: float16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + let mut ret_val: float16x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip1) + assert_instr(uzp1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(zip2) + assert_instr(uzp2) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vuzpq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { unsafe { - let a1: uint32x2_t = simd_shuffle!(a, b, [0, 2]); - let b1: uint32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a1, b1)) + let a0: float16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: float16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + transmute((a0, b0)) } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn1) + assert_instr(uzp1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn2) + assert_instr(uzp2) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vuzpq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { unsafe { - let a1: float32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: float32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a0: float16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: float16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + let mut ret_val: float16x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67241,26 +68527,27 @@ pub fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { +pub fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { unsafe { - let a1: int8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: int8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) + let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a0, b0)) } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67270,34 +68557,32 @@ pub fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { +pub fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { unsafe { - let a1: int8x16_t = simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ); - let b1: int8x16_t = simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ); - transmute((a1, b1)) + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: float32x2x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67307,26 +68592,27 @@ pub fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { +pub fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { unsafe { - let a1: int16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: int16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) + let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a0, b0)) } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67336,26 +68622,32 @@ pub fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { +pub fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { unsafe { - let a1: int16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: int16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int32x2_t = simd_shuffle!(b, b, [1, 0]); + let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: int32x2x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67365,26 +68657,27 @@ pub fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { +pub fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { unsafe { - let a1: int32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: int32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) + let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + transmute((a0, b0)) } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67394,26 +68687,32 @@ pub fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { +pub fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { unsafe { - let a1: uint8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: uint8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint32x2_t = simd_shuffle!(b, b, [1, 0]); + let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: uint32x2x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn1) + assert_instr(uzp1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn2) + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67423,34 +68722,27 @@ pub fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { +pub fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { unsafe { - let a1: uint8x16_t = simd_shuffle!( - a, - b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] - ); - let b1: uint8x16_t = simd_shuffle!( - a, - b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] - ); - transmute((a1, b1)) + let a0: float32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: float32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + transmute((a0, b0)) } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn1) + assert_instr(uzp1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn2) + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67460,26 +68752,32 @@ pub fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { +pub fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { unsafe { - let a1: uint16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: uint16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a0: float32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: float32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + let mut ret_val: float32x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn1) + assert_instr(uzp1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn2) + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67489,26 +68787,27 @@ pub fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { +pub fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { unsafe { - let a1: uint16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: uint16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) + let a0: int8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: int8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + transmute((a0, b0)) } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn1) + assert_instr(uzp1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn2) + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67518,26 +68817,32 @@ pub fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { +pub fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { unsafe { - let a1: uint32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: uint32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a0: int8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: int8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + let mut ret_val: int8x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn1) + assert_instr(uzp1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn2) + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67547,26 +68852,35 @@ pub fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { +pub fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { unsafe { - let a1: poly8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: poly8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) + let a0: int8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + let b0: int8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + transmute((a0, b0)) } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn1) + assert_instr(uzp1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn2) + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67576,34 +68890,50 @@ pub fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { +pub fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { unsafe { - let a1: poly8x16_t = simd_shuffle!( + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let a0: int8x16_t = simd_shuffle!( a, b, - [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30] + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] ); - let b1: poly8x16_t = simd_shuffle!( + let b0: int8x16_t = simd_shuffle!( a, b, - [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31] + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] ); - transmute((a1, b1)) + let mut ret_val: int8x16x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn1) + assert_instr(uzp1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn2) + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67613,26 +68943,27 @@ pub fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { +pub fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { unsafe { - let a1: poly16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); - let b1: poly16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); - transmute((a1, b1)) + let a0: int16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: int16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + transmute((a0, b0)) } } -#[doc = "Transpose elements"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn1) + assert_instr(uzp1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(trn2) + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67642,22 +68973,32 @@ pub fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { +pub fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { unsafe { - let a1: poly16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); - let b1: poly16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); - transmute((a1, b1)) + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a0: int16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: int16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + let mut ret_val: int16x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67667,22 +69008,27 @@ pub fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { +pub fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { unsafe { - let c: int8x8_t = simd_and(a, b); - let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) + let a0: int16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: int16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + transmute((a0, b0)) } } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67692,22 +69038,32 @@ pub fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { +pub fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { unsafe { - let c: int8x16_t = simd_and(a, b); - let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a0: int16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: int16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + let mut ret_val: int16x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67717,22 +69073,27 @@ pub fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { +pub fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { unsafe { - let c: int16x4_t = simd_and(a, b); - let d: i16x4 = i16x4::new(0, 0, 0, 0); - simd_ne(c, transmute(d)) + let a0: int32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: int32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + transmute((a0, b0)) } } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67742,22 +69103,32 @@ pub fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { +pub fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { unsafe { - let c: int16x8_t = simd_and(a, b); - let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a0: int32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: int32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + let mut ret_val: int32x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67767,22 +69138,27 @@ pub fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { +pub fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { unsafe { - let c: int32x2_t = simd_and(a, b); - let d: i32x2 = i32x2::new(0, 0); - simd_ne(c, transmute(d)) + let a0: uint8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: uint8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + transmute((a0, b0)) } } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67792,22 +69168,32 @@ pub fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { +pub fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { unsafe { - let c: int32x4_t = simd_and(a, b); - let d: i32x4 = i32x4::new(0, 0, 0, 0); - simd_ne(c, transmute(d)) + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a0: uint8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: uint8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + let mut ret_val: uint8x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67817,22 +69203,35 @@ pub fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { +pub fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { unsafe { - let c: poly8x8_t = simd_and(a, b); - let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) + let a0: uint8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + let b0: uint8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + transmute((a0, b0)) } } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67842,22 +69241,50 @@ pub fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { +pub fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { unsafe { - let c: poly8x16_t = simd_and(a, b); - let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let a0: uint8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + let b0: uint8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + let mut ret_val: uint8x16x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67867,22 +69294,27 @@ pub fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { +pub fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { unsafe { - let c: poly16x4_t = simd_and(a, b); - let d: i16x4 = i16x4::new(0, 0, 0, 0); - simd_ne(c, transmute(d)) + let a0: uint16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: uint16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + transmute((a0, b0)) } } -#[doc = "Signed compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67892,22 +69324,32 @@ pub fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { +pub fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { unsafe { - let c: poly16x8_t = simd_and(a, b); - let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a0: uint16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: uint16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + let mut ret_val: uint16x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp1) +)] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67917,22 +69359,27 @@ pub fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { +pub fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { unsafe { - let c: uint8x8_t = simd_and(a, b); - let d: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) + let a0: uint16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: uint16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + transmute((a0, b0)) } } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u8)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67942,22 +69389,32 @@ pub fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { +pub fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { unsafe { - let c: uint8x16_t = simd_and(a, b); - let d: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a0: uint16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: uint16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + let mut ret_val: uint16x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67967,22 +69424,27 @@ pub fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { +pub fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { unsafe { - let c: uint16x4_t = simd_and(a, b); - let d: u16x4 = u16x4::new(0, 0, 0, 0); - simd_ne(c, transmute(d)) + let a0: uint32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: uint32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + transmute((a0, b0)) } } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u16)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -67992,22 +69454,32 @@ pub fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { +pub fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { unsafe { - let c: uint16x8_t = simd_and(a, b); - let d: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); - simd_ne(c, transmute(d)) + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a0: uint32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: uint32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + let mut ret_val: uint32x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -68017,22 +69489,27 @@ pub fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { +pub fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { unsafe { - let c: uint32x2_t = simd_and(a, b); - let d: u32x2 = u32x2::new(0, 0); - simd_ne(c, transmute(d)) + let a0: poly8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: poly8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + transmute((a0, b0)) } } -#[doc = "Unsigned compare bitwise Test bits nonzero"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(cmtst) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), @@ -68042,197 +69519,209 @@ pub fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { +pub fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { unsafe { - let c: uint32x4_t = simd_and(a, b); - let d: u32x4 = u32x4::new(0, 0, 0, 0); - simd_ne(c, transmute(d)) + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a0: poly8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: poly8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + let mut ret_val: poly8x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } } -#[doc = "Dot product index form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_lane_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p8)"] #[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usdot, LANE = 0) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vusdot_lane_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 1); - let c = vreinterpret_s32_s8(c); - let c = vdup_lane_s32::(c); - vusdot_s32(a, b, vreinterpret_s8_s32(c)) +pub fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { + unsafe { + let a0: poly8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + let b0: poly8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + transmute((a0, b0)) + } } -#[doc = "Dot product index form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_lane_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p8)"] #[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usdot, LANE = 0) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] -#[rustc_legacy_const_generics(3)] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vusdotq_lane_s32(a: int32x4_t, b: uint8x16_t, c: int8x8_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 1); - let c = vreinterpret_s32_s8(c); - let c = vdupq_lane_s32::(c); - vusdotq_s32(a, b, vreinterpretq_s8_s32(c)) +pub fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let a0: poly8x16_t = simd_shuffle!( + a, + b, + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] + ); + let b0: poly8x16_t = simd_shuffle!( + a, + b, + [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] + ); + let mut ret_val: poly8x16x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val + } } -#[doc = "Dot product index form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p16)"] #[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 3))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usdot, LANE = 3) + assert_instr(uzp1) )] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] -pub fn vusdot_laneq_s32(a: int32x2_t, b: uint8x8_t, c: int8x16_t) -> int32x2_t { - static_assert_uimm_bits!(LANE, 2); - let c = vreinterpretq_s32_s8(c); - let c = vdup_laneq_s32::(c); - vusdot_s32(a, b, vreinterpret_s8_s32(c)) -} -#[doc = "Dot product index form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"] -#[inline] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 3))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usdot, LANE = 3) -)] -#[rustc_legacy_const_generics(3)] -#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] -pub fn vusdotq_laneq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { - static_assert_uimm_bits!(LANE, 2); - let c = vreinterpretq_s32_s8(c); - let c = vdupq_laneq_s32::(c); - vusdotq_s32(a, b, vreinterpretq_s8_s32(c)) -} -#[doc = "Dot product vector form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_s32)"] -#[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] -#[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(usdot) + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usdot.v2i32.v8i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v2i32.v8i8")] - fn _vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t; +pub fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { + unsafe { + let a0: poly16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: poly16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + transmute((a0, b0)) } - unsafe { _vusdot_s32(a, b, c) } } -#[doc = "Dot product vector form with unsigned and signed integers"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p16)"] #[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( - all( - test, - any(target_arch = "aarch64", target_arch = "arm64ec"), - target_endian = "little" - ), - assert_instr(usdot) + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usdot.v4i32.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usdot.v4i32.v16i8")] - fn _vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t; +pub fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { + unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: poly16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a0: poly16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); + let b0: poly16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + let mut ret_val: poly16x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } - unsafe { _vusdotq_s32(a, b, c) } } -#[doc = "Unsigned and signed 8-bit integer matrix multiply-accumulate"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusmmlaq_s32)"] +#[doc = "Unzip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p16)"] #[inline] -#[target_feature(enable = "neon,i8mm")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(usmmla) + assert_instr(uzp1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(uzp2) )] #[cfg_attr( not(target_arch = "arm"), - unstable(feature = "stdarch_neon_i8mm", issue = "117223") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { - unsafe extern "unadjusted" { - #[cfg_attr( - any(target_arch = "aarch64", target_arch = "arm64ec"), - link_name = "llvm.aarch64.neon.usmmla.v4i32.v16i8" - )] - #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.usmmla.v4i32.v16i8")] - fn _vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t; +pub fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { + unsafe { + let a0: poly16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: poly16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + transmute((a0, b0)) } - unsafe { _vusmmlaq_s32(a, b, c) } } #[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p16)"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( @@ -68243,35 +69732,39 @@ pub fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(uzp2) )] -#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vuzp_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { +pub fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { unsafe { - let a0: float16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: float16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - transmute((a0, b0)) + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a0: poly16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); + let b0: poly16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + let mut ret_val: poly16x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f16)"] #[inline] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vzip.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp2) + assert_instr(zip2) )] #[target_feature(enable = "neon,fp16")] #[cfg_attr( @@ -68283,19 +69776,19 @@ pub fn vuzp_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] #[cfg(not(target_arch = "arm64ec"))] -pub fn vuzpq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { +pub fn vzip_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { unsafe { - let a0: float16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: float16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + let a0: float16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: float16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f32)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vzip.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip1) @@ -68304,27 +69797,34 @@ pub fn vuzpq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip2) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vzip_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { unsafe { - let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a0, b0)) + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a0: float16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: float16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + let mut ret_val: float16x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s32)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vzip.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip1) @@ -68333,27 +69833,29 @@ pub fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip2) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vzipq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { unsafe { - let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); + let a0: float16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: float16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u32)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f16)"] #[inline] -#[target_feature(enable = "neon")] +#[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vzip.16"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip1) @@ -68362,34 +69864,42 @@ pub fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip2) )] +#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") + stable(feature = "stdarch_neon_fp16", since = "1.94.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { +#[cfg(not(target_arch = "arm64ec"))] +pub fn vzipq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { unsafe { - let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a0, b0)) + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: float16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a0: float16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: float16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let mut ret_val: float16x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f32)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -68399,26 +69909,27 @@ pub fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { +pub fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { unsafe { - let a0: float32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: float32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); transmute((a0, b0)) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -68428,26 +69939,32 @@ pub fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { +pub fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { unsafe { - let a0: int8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: int8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - transmute((a0, b0)) + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: float32x2_t = simd_shuffle!(b, b, [1, 0]); + let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: float32x2x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -68457,34 +69974,27 @@ pub fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { +pub fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { unsafe { - let a0: int8x16_t = simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ); - let b0: int8x16_t = simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ); + let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); transmute((a0, b0)) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -68494,26 +70004,32 @@ pub fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { +pub fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { unsafe { - let a0: int16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: int16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - transmute((a0, b0)) + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int32x2_t = simd_shuffle!(b, b, [1, 0]); + let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: int32x2x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -68523,26 +70039,27 @@ pub fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { +pub fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { unsafe { - let a0: int16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: int16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); transmute((a0, b0)) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s32)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -68552,26 +70069,32 @@ pub fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { +pub fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { unsafe { - let a0: int32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: int32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - transmute((a0, b0)) + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint32x2_t = simd_shuffle!(b, b, [1, 0]); + let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); + let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); + let mut ret_val: uint32x2x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -68581,26 +70104,27 @@ pub fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { +pub fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { unsafe { - let a0: uint8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: uint8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + let a0: int8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: int8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -68610,34 +70134,32 @@ pub fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { +pub fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { unsafe { - let a0: uint8x16_t = simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ); - let b0: uint8x16_t = simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ); - transmute((a0, b0)) + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a0: int8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: int8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let mut ret_val: int8x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -68647,26 +70169,27 @@ pub fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { +pub fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { unsafe { - let a0: uint16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: uint16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + let a0: int16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: int16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -68676,26 +70199,32 @@ pub fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { +pub fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { unsafe { - let a0: uint16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: uint16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - transmute((a0, b0)) + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a0: int16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: int16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + let mut ret_val: int16x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u32)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -68705,26 +70234,27 @@ pub fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { +pub fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { unsafe { - let a0: uint32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: uint32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); + let a0: uint8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: uint8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -68734,26 +70264,32 @@ pub fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { +pub fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { unsafe { - let a0: poly8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: poly8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); - transmute((a0, b0)) + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a0: uint8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: uint8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let mut ret_val: uint8x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p8)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -68763,34 +70299,27 @@ pub fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { +pub fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { unsafe { - let a0: poly8x16_t = simd_shuffle!( - a, - b, - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30] - ); - let b0: poly8x16_t = simd_shuffle!( - a, - b, - [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31] - ); + let a0: uint16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: uint16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -68800,26 +70329,32 @@ pub fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { +pub fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { unsafe { - let a0: poly16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); - let b0: poly16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); - transmute((a0, b0)) + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a0: uint16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: uint16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + let mut ret_val: uint16x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } } -#[doc = "Unzip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p16)"] +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp1) + assert_instr(zip1) )] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(uzp2) + assert_instr(zip2) )] #[cfg_attr( not(target_arch = "arm"), @@ -68829,18 +70364,20 @@ pub fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { +pub fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { unsafe { - let a0: poly16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); - let b0: poly16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); + let a0: poly8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: poly8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p8)"] #[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vzip.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip1) @@ -68849,28 +70386,33 @@ pub fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip2) )] -#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vzip_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { +pub fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { unsafe { - let a0: float16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: float16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - transmute((a0, b0)) + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a0: poly8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: poly8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let mut ret_val: poly8x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p16)"] #[inline] +#[cfg(target_endian = "little")] +#[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vzip.16"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip1) @@ -68879,29 +70421,28 @@ pub fn vzip_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip2) )] -#[target_feature(enable = "neon,fp16")] #[cfg_attr( not(target_arch = "arm"), - stable(feature = "stdarch_neon_fp16", since = "1.94.0") + stable(feature = "neon_intrinsics", since = "1.59.0") )] #[cfg_attr( target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -#[cfg(not(target_arch = "arm64ec"))] -pub fn vzipq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { +pub fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { unsafe { - let a0: float16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: float16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let a0: poly16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: poly16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip1) @@ -68918,19 +70459,25 @@ pub fn vzipq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { +pub fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { unsafe { - let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a0, b0)) + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: poly16x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a0: poly16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: poly16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + let mut ret_val: poly16x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip1) @@ -68947,19 +70494,20 @@ pub fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { +pub fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { unsafe { - let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); + let a0: float32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: float32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip1) @@ -68976,19 +70524,25 @@ pub fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { +pub fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { unsafe { - let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); - let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); - transmute((a0, b0)) + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: float32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a0: float32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: float32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + let mut ret_val: float32x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip1) @@ -69005,19 +70559,28 @@ pub fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { +pub fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { unsafe { - let a0: int8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: int8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let a0: int8x16_t = simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ); + let b0: int8x16_t = simd_shuffle!( + a, + b, + [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] + ); transmute((a0, b0)) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip1) @@ -69034,19 +70597,43 @@ pub fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { +pub fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { unsafe { - let a0: int16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: int16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - transmute((a0, b0)) + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let a0: int8x16_t = simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ); + let b0: int8x16_t = simd_shuffle!( + a, + b, + [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] + ); + let mut ret_val: int8x16x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip1) @@ -69063,19 +70650,20 @@ pub fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { +pub fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { unsafe { - let a0: uint8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: uint8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let a0: int16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: int16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip1) @@ -69092,19 +70680,25 @@ pub fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { +pub fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { unsafe { - let a0: uint16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: uint16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - transmute((a0, b0)) + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: int16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a0: int16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: int16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let mut ret_val: int16x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip1) @@ -69121,19 +70715,20 @@ pub fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { +pub fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { unsafe { - let a0: poly8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: poly8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let a0: int32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: int32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), assert_instr(zip1) @@ -69150,16 +70745,22 @@ pub fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { +pub fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { unsafe { - let a0: poly16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: poly16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - transmute((a0, b0)) + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a0: int32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: int32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + let mut ret_val: int32x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -69179,16 +70780,25 @@ pub fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { +pub fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { unsafe { - let a0: float32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: float32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + let a0: uint8x16_t = simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ); + let b0: uint8x16_t = simd_shuffle!( + a, + b, + [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] + ); transmute((a0, b0)) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -69208,24 +70818,40 @@ pub fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { +pub fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { unsafe { - let a0: int8x16_t = simd_shuffle!( + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let a0: uint8x16_t = simd_shuffle!( a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] ); - let b0: int8x16_t = simd_shuffle!( + let b0: uint8x16_t = simd_shuffle!( a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] ); - transmute((a0, b0)) + let mut ret_val: uint8x16x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -69245,16 +70871,17 @@ pub fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { +pub fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { unsafe { - let a0: int16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: int16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let a0: uint16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: uint16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); transmute((a0, b0)) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u16)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -69274,16 +70901,22 @@ pub fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { +pub fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { unsafe { - let a0: int32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: int32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); - transmute((a0, b0)) + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a0: uint16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: uint16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let mut ret_val: uint16x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u32)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -69303,24 +70936,17 @@ pub fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { +pub fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { unsafe { - let a0: uint8x16_t = simd_shuffle!( - a, - b, - [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] - ); - let b0: uint8x16_t = simd_shuffle!( - a, - b, - [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] - ); + let a0: uint32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: uint32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); transmute((a0, b0)) } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u32)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -69340,16 +70966,22 @@ pub fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { +pub fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { unsafe { - let a0: uint16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); - let b0: uint16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); - transmute((a0, b0)) + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); + let a0: uint32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); + let b0: uint32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + let mut ret_val: uint32x4x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val } } #[doc = "Zip vectors"] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p8)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -69369,16 +71001,25 @@ pub fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] -pub fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { +pub fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { unsafe { - let a0: uint32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); - let b0: uint32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); + let a0: poly8x16_t = simd_shuffle!( + a, + b, + [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23] + ); + let b0: poly8x16_t = simd_shuffle!( + a, + b, + [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] + ); transmute((a0, b0)) } } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p8)"] #[inline] +#[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -69400,6 +71041,10 @@ pub fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { )] pub fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let a0: poly8x16_t = simd_shuffle!( a, b, @@ -69410,12 +71055,24 @@ pub fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31] ); - transmute((a0, b0)) + let mut ret_val: poly8x16x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val } } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p16)"] #[inline] +#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -69442,3 +71099,38 @@ pub fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { transmute((a0, b0)) } } +#[doc = "Zip vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p16)"] +#[inline] +#[cfg(target_endian = "big")] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip1) +)] +#[cfg_attr( + all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), + assert_instr(zip2) +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +pub fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { + unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let a0: poly16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); + let b0: poly16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); + let mut ret_val: poly16x8x2_t = transmute((a0, b0)); + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val + } +} diff --git a/crates/intrinsic-test/missing_x86.txt b/crates/intrinsic-test/missing_x86.txt index f88a125bfd..a02f6b68e3 100644 --- a/crates/intrinsic-test/missing_x86.txt +++ b/crates/intrinsic-test/missing_x86.txt @@ -45,15 +45,23 @@ _mm_set1_pch _tpause _umwait -# IMM8 must be an even number in the range `0..=62` -_mm_sm3rnds2_epi32 - # SDE ERROR: Cannot execute XGETBV with ECX != 0 _xgetbv # top bits are undefined, unclear how to test these +_mm256_castph128_ph256 +_mm256_castps128_ps256 +_mm256_castpd128_pd256 _mm256_castsi128_si256 + +_mm512_castph128_ph512 +_mm512_castps128_ps512 +_mm512_castpd128_pd512 _mm512_castsi128_si512 + +_mm512_castph256_ph512 +_mm512_castps256_ps512 +_mm512_castpd256_pd512 _mm512_castsi256_si512 # Clang bug @@ -65,3 +73,15 @@ _mm512_mask_reduce_min_pd _mm512_mask_reduce_min_ps _mm_extract_epi16 _mm_extract_epi8 + +# TODO: fix +_mm_movemask_epi8 +_mm_movemask_pd + +# Rounding errors in release mode +_mm_maskz_fmadd_sd +_mm_maskz_fmadd_ss +_mm_maskz_fmsub_sd +_mm_maskz_fmsub_ss +_mm_maskz_fnmadd_sd +_mm_maskz_fnmadd_ss diff --git a/crates/intrinsic-test/src/arm/compile.rs b/crates/intrinsic-test/src/arm/compile.rs deleted file mode 100644 index a672da2cc0..0000000000 --- a/crates/intrinsic-test/src/arm/compile.rs +++ /dev/null @@ -1,51 +0,0 @@ -use crate::common::cli::ProcessedCli; -use crate::common::compile_c::{CompilationCommandBuilder, CppCompilation}; - -pub fn build_cpp_compilation(config: &ProcessedCli) -> Option { - let cpp_compiler = config.cpp_compiler.as_ref()?; - - // -ffp-contract=off emulates Rust's approach of not fusing separate mul-add operations - let mut command = CompilationCommandBuilder::new() - .add_arch_flags(["armv8.6-a", "crypto", "crc", "dotprod", "fp16"]) - .set_compiler(cpp_compiler) - .set_target(&config.target) - .set_opt_level("2") - .set_cxx_toolchain_dir(config.cxx_toolchain_dir.as_deref()) - .set_project_root("c_programs") - .add_extra_flags(["-ffp-contract=off", "-Wno-narrowing"]); - - if !config.target.contains("v7") { - command = command.add_arch_flags(["faminmax", "lut", "sha3", "fp8"]); - } - - if !cpp_compiler.contains("clang") { - command = command.add_extra_flag("-flax-vector-conversions"); - } - - let mut cpp_compiler = command.into_cpp_compilation(); - - if config.target.contains("aarch64_be") { - let Some(ref cxx_toolchain_dir) = config.cxx_toolchain_dir else { - panic!( - "target `{}` must specify `cxx_toolchain_dir`", - config.target - ) - }; - - cpp_compiler.command_mut().args([ - &format!("--sysroot={cxx_toolchain_dir}/aarch64_be-none-linux-gnu/libc"), - "--include-directory", - &format!("{cxx_toolchain_dir}/aarch64_be-none-linux-gnu/include/c++/14.3.1"), - "--include-directory", - &format!("{cxx_toolchain_dir}/aarch64_be-none-linux-gnu/include/c++/14.3.1/aarch64_be-none-linux-gnu"), - "-L", - &format!("{cxx_toolchain_dir}/lib/gcc/aarch64_be-none-linux-gnu/14.3.1"), - "-L", - &format!("{cxx_toolchain_dir}/aarch64_be-none-linux-gnu/libc/usr/lib"), - "-B", - &format!("{cxx_toolchain_dir}/lib/gcc/aarch64_be-none-linux-gnu/14.3.1"), - ]); - } - - Some(cpp_compiler) -} diff --git a/crates/intrinsic-test/src/arm/config.rs b/crates/intrinsic-test/src/arm/config.rs index 60bb0ca56c..9371db737e 100644 --- a/crates/intrinsic-test/src/arm/config.rs +++ b/crates/intrinsic-test/src/arm/config.rs @@ -3,51 +3,6 @@ pub const NOTICE: &str = "\ // test are derived from a JSON specification, published under the same license as the // `intrinsic-test` crate.\n"; -pub const PLATFORM_C_FORWARD_DECLARATIONS: &str = r#" -#ifdef __aarch64__ -std::ostream& operator<<(std::ostream& os, poly128_t value); -#endif - -std::ostream& operator<<(std::ostream& os, float16_t value); -std::ostream& operator<<(std::ostream& os, uint8_t value); - -// T1 is the `To` type, T2 is the `From` type -template T1 cast(T2 x) { - static_assert(sizeof(T1) == sizeof(T2), "sizeof T1 and T2 must be the same"); - T1 ret{}; - memcpy(&ret, &x, sizeof(T1)); - return ret; -} -"#; - -pub const PLATFORM_C_DEFINITIONS: &str = r#" -#ifdef __aarch64__ -std::ostream& operator<<(std::ostream& os, poly128_t value) { - std::stringstream temp; - do { - int n = value % 10; - value /= 10; - temp << n; - } while (value != 0); - std::string tempstr(temp.str()); - std::string res(tempstr.rbegin(), tempstr.rend()); - os << res; - return os; -} - -#endif - -std::ostream& operator<<(std::ostream& os, float16_t value) { - os << static_cast(value); - return os; -} - -std::ostream& operator<<(std::ostream& os, uint8_t value) { - os << (unsigned int) value; - return os; -} -"#; - pub const PLATFORM_RUST_DEFINITIONS: &str = ""; pub const PLATFORM_RUST_CFGS: &str = r#" diff --git a/crates/intrinsic-test/src/arm/mod.rs b/crates/intrinsic-test/src/arm/mod.rs index 99c8da854c..9bf6c95ffd 100644 --- a/crates/intrinsic-test/src/arm/mod.rs +++ b/crates/intrinsic-test/src/arm/mod.rs @@ -1,5 +1,4 @@ mod argument; -mod compile; mod config; mod intrinsic; mod json_parser; @@ -7,7 +6,6 @@ mod types; use crate::common::SupportedArchitectureTest; use crate::common::cli::ProcessedCli; -use crate::common::compile_c::CppCompilation; use crate::common::intrinsic::Intrinsic; use crate::common::intrinsic_helpers::TypeKind; use intrinsic::ArmIntrinsicType; @@ -15,16 +13,11 @@ use json_parser::get_neon_intrinsics; pub struct ArmArchitectureTest { intrinsics: Vec>, - cli_options: ProcessedCli, } impl SupportedArchitectureTest for ArmArchitectureTest { type IntrinsicImpl = ArmIntrinsicType; - fn cli_options(&self) -> &ProcessedCli { - &self.cli_options - } - fn intrinsics(&self) -> &[Intrinsic] { &self.intrinsics } @@ -32,18 +25,16 @@ impl SupportedArchitectureTest for ArmArchitectureTest { const NOTICE: &str = config::NOTICE; const PLATFORM_C_HEADERS: &[&str] = &["arm_neon.h", "arm_acle.h", "arm_fp16.h"]; - const PLATFORM_C_DEFINITIONS: &str = config::PLATFORM_C_DEFINITIONS; - const PLATFORM_C_FORWARD_DECLARATIONS: &str = config::PLATFORM_C_FORWARD_DECLARATIONS; const PLATFORM_RUST_DEFINITIONS: &str = config::PLATFORM_RUST_DEFINITIONS; const PLATFORM_RUST_CFGS: &str = config::PLATFORM_RUST_CFGS; - fn cpp_compilation(&self) -> Option { - compile::build_cpp_compilation(&self.cli_options) + fn arch_flags(&self) -> Vec<&str> { + vec!["-march=armv8.6a+crypto+crc+dotprod+fp16"] } fn create(cli_options: ProcessedCli) -> Self { - let a32 = cli_options.target.contains("v7"); + let a32 = cli_options.target.starts_with("armv7"); let mut intrinsics = get_neon_intrinsics(&cli_options.filename, &cli_options.target) .expect("Error parsing input file"); @@ -68,9 +59,6 @@ impl SupportedArchitectureTest for ArmArchitectureTest { .take(sample_size) .collect::>(); - Self { - intrinsics, - cli_options, - } + Self { intrinsics } } } diff --git a/crates/intrinsic-test/src/arm/types.rs b/crates/intrinsic-test/src/arm/types.rs index 18468bd558..e9614eba21 100644 --- a/crates/intrinsic-test/src/arm/types.rs +++ b/crates/intrinsic-test/src/arm/types.rs @@ -1,6 +1,4 @@ use super::intrinsic::ArmIntrinsicType; -use crate::common::cli::Language; -use crate::common::indentation::Indentation; use crate::common::intrinsic_helpers::{IntrinsicType, IntrinsicTypeDefinition, Sign, TypeKind}; impl IntrinsicTypeDefinition for ArmIntrinsicType { @@ -8,8 +6,8 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType { fn c_type(&self) -> String { let prefix = self.kind.c_prefix(); - if let (Some(bit_len), simd_len, vec_len) = (self.bit_len, self.simd_len, self.vec_len) { - match (simd_len, vec_len) { + if let Some(bit_len) = self.bit_len { + match (self.simd_len, self.vec_len) { (None, None) => format!("{prefix}{bit_len}_t"), (Some(simd), None) => format!("{prefix}{bit_len}x{simd}_t"), (Some(simd), Some(vec)) => format!("{prefix}{bit_len}x{simd}x{vec}_t"), @@ -20,19 +18,24 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType { } } - fn c_single_vector_type(&self) -> String { - if let (Some(bit_len), Some(simd_len)) = (self.bit_len, self.simd_len) { - format!( - "{prefix}{bit_len}x{simd_len}_t", - prefix = self.kind.c_prefix() - ) + fn rust_type(&self) -> String { + let rust_prefix = self.kind.rust_prefix(); + let c_prefix = self.kind.c_prefix(); + + if let Some(bit_len) = self.bit_len { + match (self.simd_len, self.vec_len) { + (None, None) => format!("{rust_prefix}{bit_len}"), + (Some(simd), None) => format!("{c_prefix}{bit_len}x{simd}_t"), + (Some(simd), Some(vec)) => format!("{c_prefix}{bit_len}x{simd}x{vec}_t"), + (None, Some(_)) => todo!("{self:#?}"), // Likely an invalid case + } } else { - unreachable!("Shouldn't be called on this type") + todo!("{self:#?}") } } /// Determines the load function for this type. - fn get_load_function(&self, language: Language) -> String { + fn get_load_function(&self) -> String { if let IntrinsicType { kind: k, bit_len: Some(bl), @@ -47,16 +50,13 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType { "" }; - let choose_workaround = language == Language::C && self.target.contains("v7"); format!( "vld{len}{quad}_{type}{size}", type = match k { TypeKind::Int(Sign::Unsigned) => "u", TypeKind::Int(Sign::Signed) => "s", TypeKind::Float => "f", - // The ACLE doesn't support 64-bit polynomial loads on Armv7 - // if armv7 and bl == 64, use "s", else "p" - TypeKind::Poly => if choose_workaround && *bl == 64 {"s"} else {"p"}, + TypeKind::Poly => "p", x => todo!("get_load_function TypeKind: {x:#?}"), }, size = bl, @@ -67,97 +67,6 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType { todo!("get_load_function IntrinsicType: {self:#?}") } } - - /// Determines the get lane function for this type. - fn get_lane_function(&self) -> String { - if let IntrinsicType { - kind: k, - bit_len: Some(bl), - simd_len, - .. - } = &self.data - { - let quad = if (simd_len.unwrap_or(1) * bl) > 64 { - "q" - } else { - "" - }; - format!( - "vget{quad}_lane_{type}{size}", - type = match k { - TypeKind::Int(Sign::Unsigned) => "u", - TypeKind::Int(Sign::Signed) => "s", - TypeKind::Float => "f", - TypeKind::Poly => "p", - x => todo!("get_load_function TypeKind: {x:#?}"), - }, - size = bl, - quad = quad, - ) - } else { - todo!("get_lane_function IntrinsicType: {self:#?}") - } - } - - /// Generates a std::cout for the intrinsics results that will match the - /// rust debug output format for the return type. The generated line assumes - /// there is an int i in scope which is the current pass number. - fn print_result_c(&self, indentation: Indentation, additional: &str) -> String { - let lanes = if self.num_vectors() > 1 { - (0..self.num_vectors()) - .map(|vector| { - format!( - r#""{ty}(" << {lanes} << ")""#, - ty = self.c_single_vector_type(), - lanes = (0..self.num_lanes()) - .map(move |idx| -> std::string::String { - let lane_fn = self.get_lane_function(); - let final_cast = self.generate_final_type_cast(); - format!( - "{final_cast}{lane_fn}(__return_value.val[{vector}], {idx})" - ) - }) - .collect::>() - .join(r#" << ", " << "#) - ) - }) - .collect::>() - .join(r#" << ", " << "#) - } else if self.num_lanes() > 1 { - (0..self.num_lanes()) - .map(|idx| -> std::string::String { - let lane_fn = self.get_lane_function(); - let final_cast = self.generate_final_type_cast(); - format!("{final_cast}{lane_fn}(__return_value, {idx})") - }) - .collect::>() - .join(r#" << ", " << "#) - } else { - format!( - "{promote}cast<{cast}>(__return_value)", - cast = match self.kind() { - TypeKind::Float if self.inner_size() == 16 => "float16_t".to_string(), - TypeKind::Float if self.inner_size() == 32 => "float".to_string(), - TypeKind::Float if self.inner_size() == 64 => "double".to_string(), - TypeKind::Int(Sign::Signed) => format!("int{}_t", self.inner_size()), - TypeKind::Int(Sign::Unsigned) => format!("uint{}_t", self.inner_size()), - TypeKind::Poly => format!("poly{}_t", self.inner_size()), - ty => todo!("print_result_c - Unknown type: {ty:#?}"), - }, - promote = self.generate_final_type_cast(), - ) - }; - - format!( - r#"{indentation}std::cout << "Result {additional}-" << i+1 << ": {ty}" << std::fixed << std::setprecision(150) << {lanes} << "{close}" << std::endl;"#, - ty = if self.is_simd() { - format!("{}(", self.c_type()) - } else { - String::from("") - }, - close = if self.is_simd() { ")" } else { "" }, - ) - } } impl ArmIntrinsicType { diff --git a/crates/intrinsic-test/src/common/argument.rs b/crates/intrinsic-test/src/common/argument.rs index 8ae9869db0..04729cd714 100644 --- a/crates/intrinsic-test/src/common/argument.rs +++ b/crates/intrinsic-test/src/common/argument.rs @@ -1,4 +1,5 @@ -use super::cli::Language; +use itertools::Itertools; + use super::constraint::Constraint; use super::gen_rust::PASSES; use super::indentation::Indentation; @@ -50,32 +51,15 @@ where self.constraint.is_some() } - /// The binding keyword (e.g. "const" or "let") for the array of possible test inputs. - fn rust_vals_array_binding(&self) -> impl std::fmt::Display { - if self.ty.is_rust_vals_array_const() { - "const" - } else { - "let" - } - } - /// The name (e.g. "A_VALS" or "a_vals") for the array of possible test inputs. pub(crate) fn rust_vals_array_name(&self) -> impl std::fmt::Display { - if self.ty.is_rust_vals_array_const() { - let loads = crate::common::gen_rust::PASSES; - format!( - "{}_{ty}_{load_size}", - self.name.to_uppercase(), - ty = self.ty.rust_scalar_type(), - load_size = self.ty.num_lanes() * self.ty.num_vectors() + loads - 1, - ) - } else { - format!("{}_vals", self.name.to_lowercase()) - } - } - - fn as_call_param_c(&self) -> String { - self.ty.as_call_param_c(&self.generate_name()) + let loads = crate::common::gen_rust::PASSES; + format!( + "{}_{ty}_{load_size}", + self.name.to_uppercase(), + ty = self.ty.rust_scalar_type(), + load_size = self.ty.num_lanes() * self.ty.num_vectors() + loads - 1, + ) } } @@ -88,13 +72,40 @@ impl ArgumentList where T: IntrinsicTypeDefinition, { - /// Converts the argument list into the call parameters for a C function call. - /// e.g. this would generate something like `a, &b, c` - pub fn as_call_param_c(&self) -> String { + pub fn as_non_imm_arglist_c(&self) -> String { self.iter() - .map(|arg| arg.as_call_param_c()) - .collect::>() - .join(", ") + .filter(|arg| !arg.has_constraint()) + .format_with("", |arg, fmt| { + fmt(&format_args!(", const {}* {}", arg.to_c_type(), arg.name)) + }) + .to_string() + } + + pub fn as_non_imm_arglist_rust(&self) -> String { + self.iter() + .filter(|arg| !arg.has_constraint()) + .format_with("", |arg, fmt| { + fmt(&format_args!( + ", {}: *const {}", + arg.name, + arg.ty.rust_type() + )) + }) + .to_string() + } + + pub fn as_call_params_c(&self, imm_args: &[i64]) -> String { + let mut imm_args = imm_args.iter(); + self.iter() + .format_with(", ", |arg, fmt| { + if arg.has_constraint() { + fmt(&imm_args.next().unwrap()) + } else { + fmt(&"*")?; + fmt(&arg.name) + } + }) + .to_string() } /// Converts the argument list into the call parameters for a Rust function. @@ -103,52 +114,14 @@ where self.iter() .filter(|a| !a.has_constraint()) .map(|arg| arg.generate_name() + " as _") - .collect::>() .join(", ") } - /// Creates a line for each argument that initializes an array for C from which `loads` argument - /// values can be loaded as a sliding window. - /// e.g `const int32x2_t a_vals = {0x3effffff, 0x3effffff, 0x3f7fffff}`, if loads=2. - pub fn gen_arglists_c( - &self, - w: &mut impl std::io::Write, - indentation: Indentation, - loads: u32, - ) -> std::io::Result<()> { - for arg in self.iter().filter(|&arg| !arg.has_constraint()) { - // Setting the variables on an aligned boundary to make it easier to pick - // functions (of a specific architecture) that would help load the values. - writeln!( - w, - "{indentation}alignas(64) const {ty} {name}_vals[] = {values};", - ty = arg.ty.c_scalar_type(), - name = arg.generate_name(), - values = arg.ty.populate_random(indentation, loads, &Language::C) - )? - } - - Ok(()) - } - - /// Creates a line for each argument that initializes an array for Rust from which `loads` argument - /// values can be loaded as a sliding window, e.g `const A_VALS: [u32; 20] = [...];` - pub fn gen_arglists_rust( - &self, - w: &mut impl std::io::Write, - indentation: Indentation, - loads: u32, - ) -> std::io::Result<()> { - for arg in self.iter().filter(|&arg| !arg.has_constraint()) { - // Constants are defined globally. - if arg.ty.is_rust_vals_array_const() { - continue; - } - - Self::gen_arg_rust(arg, w, indentation, loads)?; - } - - Ok(()) + pub fn as_c_call_param_rust(&self) -> String { + self.iter() + .filter(|a| !a.has_constraint()) + .map(|arg| format!(", &raw const {} as _", arg.generate_name())) + .join("") } pub fn gen_arg_rust( @@ -159,39 +132,14 @@ where ) -> std::io::Result<()> { writeln!( w, - "{indentation}{bind} {name}: [{ty}; {load_size}] = {values};\n", - bind = arg.rust_vals_array_binding(), + "{indentation}static {name}: [{ty}; {load_size}] = {values};\n", name = arg.rust_vals_array_name(), ty = arg.ty.rust_scalar_type(), load_size = arg.ty.num_lanes() * arg.ty.num_vectors() + loads - 1, - values = arg.ty.populate_random(indentation, loads, &Language::Rust) + values = arg.ty.populate_random(indentation, loads) ) } - /// Creates a line for each argument that initializes the argument from an array `[arg]_vals` at - /// an offset `i` using a load intrinsic, in C. - /// e.g `uint8x8_t a = vld1_u8(&a_vals[i]);` - /// - /// ARM-specific - pub fn load_values_c(&self, indentation: Indentation) -> String { - self.iter() - .filter(|&arg| !arg.has_constraint()) - .enumerate() - .map(|(idx, arg)| { - format!( - "{indentation}{ty} {name} = cast<{ty}>({load}(&{name}_vals[(i+{idx}) % {PASSES}]));\n", - ty = arg.to_c_type(), - name = arg.generate_name(), - load = if arg.is_simd() { - arg.ty.get_load_function(Language::C) - } else { - "*".to_string() - } - ) - }) - .collect() - } - /// Creates a line for each argument that initializes the argument from array `[ARG]_VALS` at /// an offset `i` using a load intrinsic, in Rust. /// e.g `let a = vld1_u8(A_VALS.as_ptr().offset(i));` @@ -205,7 +153,7 @@ where "{indentation}let {name} = {load}({vals_name}.as_ptr().add((i+{idx}) % {PASSES}) as _);\n", name = arg.generate_name(), vals_name = arg.rust_vals_array_name(), - load = arg.ty.get_load_function(Language::Rust), + load = arg.ty.get_load_function(), ) } else { format!( diff --git a/crates/intrinsic-test/src/common/cli.rs b/crates/intrinsic-test/src/common/cli.rs index bed8259de8..f407b5ceb7 100644 --- a/crates/intrinsic-test/src/common/cli.rs +++ b/crates/intrinsic-test/src/common/cli.rs @@ -1,12 +1,6 @@ use itertools::Itertools; use std::path::PathBuf; -#[derive(Debug, PartialEq)] -pub enum Language { - Rust, - C, -} - /// Intrinsic test tool #[derive(clap::Parser)] #[command( @@ -17,41 +11,13 @@ pub struct Cli { /// The input file containing the intrinsics pub input: PathBuf, - /// The rust toolchain to use for building the rust code - #[arg(long)] - pub toolchain: Option, - - /// The C++ compiler to use for compiling the c++ code - #[arg(long, default_value_t = String::from("clang++"))] - pub cppcompiler: String, - - /// Run the C programs under emulation with this command - #[arg(long)] - pub runner: Option, - /// Filename for a list of intrinsics to skip (one per line) #[arg(long)] pub skip: Option, - /// Regenerate test programs, but don't build or run them - #[arg(long)] - pub generate_only: bool, - /// Pass a target the test suite - #[arg(long, default_value_t = String::from("armv7-unknown-linux-gnueabihf"))] - pub target: String, - - /// Pass a profile (release, dev) - #[arg(long, default_value_t = String::from("release"))] - pub profile: String, - - /// Set the linker - #[arg(long)] - pub linker: Option, - - /// Set the sysroot for the C++ compiler #[arg(long)] - pub cxx_toolchain_dir: Option, + pub target: String, #[arg(long, default_value_t = 100u8)] pub sample_percentage: u8, @@ -59,13 +25,7 @@ pub struct Cli { pub struct ProcessedCli { pub filename: PathBuf, - pub toolchain: Option, - pub cpp_compiler: Option, - pub runner: String, pub target: String, - pub profile: String, - pub linker: Option, - pub cxx_toolchain_dir: Option, pub skip: Vec, pub sample_percentage: u8, } @@ -73,11 +33,7 @@ pub struct ProcessedCli { impl ProcessedCli { pub fn new(cli_options: Cli) -> Self { let filename = cli_options.input; - let runner = cli_options.runner.unwrap_or_default(); let target = cli_options.target; - let profile = cli_options.profile; - let linker = cli_options.linker; - let cxx_toolchain_dir = cli_options.cxx_toolchain_dir; let sample_percentage = cli_options.sample_percentage; let skip = if let Some(filename) = cli_options.skip { @@ -91,27 +47,8 @@ impl ProcessedCli { Default::default() }; - let (toolchain, cpp_compiler) = if cli_options.generate_only { - (None, None) - } else { - ( - Some( - cli_options - .toolchain - .map_or_else(String::new, |t| format!("+{t}")), - ), - Some(cli_options.cppcompiler), - ) - }; - Self { - toolchain, - cpp_compiler, - runner, target, - profile, - linker, - cxx_toolchain_dir, skip, filename, sample_percentage, diff --git a/crates/intrinsic-test/src/common/compare.rs b/crates/intrinsic-test/src/common/compare.rs deleted file mode 100644 index c1438d1bbf..0000000000 --- a/crates/intrinsic-test/src/common/compare.rs +++ /dev/null @@ -1,144 +0,0 @@ -use itertools::Itertools; -use rayon::prelude::*; -use std::{collections::HashMap, process::Command}; - -pub const INTRINSIC_DELIMITER: &str = "############"; -fn runner_command(runner: &str) -> Command { - let mut it = runner.split_whitespace(); - let mut cmd = Command::new(it.next().unwrap()); - cmd.args(it); - - cmd -} - -pub fn compare_outputs( - intrinsic_name_list: &Vec, - runner: &str, - target: &str, - profile: &str, -) -> bool { - let profile_dir = match profile { - "dev" => "debug", - _ => "release", - }; - - let (c, rust) = rayon::join( - || { - runner_command(runner) - .arg("./intrinsic-test-programs") - .current_dir("c_programs") - .output() - }, - || { - runner_command(runner) - .arg(format!( - "./target/{target}/{profile_dir}/intrinsic-test-programs" - )) - .current_dir("rust_programs") - .output() - }, - ); - let (c, rust) = match (c, rust) { - (Ok(c), Ok(rust)) => (c, rust), - failure => panic!("Failed to run: {failure:#?}"), - }; - - if !c.status.success() { - error!( - "Failed to run C program.\nstdout: {stdout}\nstderr: {stderr}", - stdout = std::str::from_utf8(&c.stdout).unwrap_or(""), - stderr = std::str::from_utf8(&c.stderr).unwrap_or(""), - ); - } - - if !rust.status.success() { - error!( - "Failed to run Rust program.\nstdout: {stdout}\nstderr: {stderr}", - stdout = std::str::from_utf8(&rust.stdout).unwrap_or(""), - stderr = std::str::from_utf8(&rust.stderr).unwrap_or(""), - ); - } - - info!("Completed running C++ and Rust test binaries"); - let c = std::str::from_utf8(&c.stdout) - .unwrap() - .to_lowercase() - .replace("-nan", "nan"); - let rust = std::str::from_utf8(&rust.stdout) - .unwrap() - .to_lowercase() - .replace("-nan", "nan"); - - let c_output_map = c - .split(INTRINSIC_DELIMITER) - .filter_map(|output| output.trim().split_once("\n")) - .collect::>(); - let rust_output_map = rust - .split(INTRINSIC_DELIMITER) - .filter_map(|output| output.trim().split_once("\n")) - .collect::>(); - - assert!(!c_output_map.is_empty(), "No C intrinsic output found!"); - - let intrinsics = c_output_map - .keys() - .chain(rust_output_map.keys()) - .unique() - .collect_vec(); - - info!("Comparing outputs"); - let intrinsics_diff_count = intrinsics - .par_iter() - .filter_map(|&&intrinsic| { - let c_output = c_output_map.get(intrinsic).unwrap(); - let rust_output = rust_output_map.get(intrinsic).unwrap(); - if rust_output.eq(c_output) { - None - } else { - let diff = diff::lines(c_output, rust_output); - let diffs = diff - .into_iter() - .filter_map(|diff| match diff { - diff::Result::Left(_) | diff::Result::Right(_) => Some(diff), - diff::Result::Both(_, _) => None, - }) - .collect_vec(); - if diffs.len() > 0 { - Some((intrinsic, diffs)) - } else { - None - } - } - }) - .inspect(|(intrinsic, diffs)| { - use std::io::Write; - - let stdout = std::io::stdout(); - let mut out = stdout.lock(); - - writeln!(out, "Difference for intrinsic: {intrinsic}").unwrap(); - diffs.into_iter().for_each(|diff| match diff { - diff::Result::Left(c) => { - writeln!(out, "C: {c}").unwrap(); - } - diff::Result::Right(rust) => { - writeln!(out, "Rust: {rust}").unwrap(); - } - _ => (), - }); - writeln!( - out, - "****************************************************************" - ) - .unwrap(); - }) - .count(); - - println!( - "{} differences found (tested {} intrinsics)", - intrinsics_diff_count, - intrinsic_name_list.len() - ); - - intrinsics_diff_count == 0 -} diff --git a/crates/intrinsic-test/src/common/compile_c.rs b/crates/intrinsic-test/src/common/compile_c.rs deleted file mode 100644 index fa78b332a7..0000000000 --- a/crates/intrinsic-test/src/common/compile_c.rs +++ /dev/null @@ -1,136 +0,0 @@ -#[derive(Clone)] -pub struct CompilationCommandBuilder { - compiler: String, - target: Option, - cxx_toolchain_dir: Option, - arch_flags: Vec, - optimization: String, - project_root: Option, - extra_flags: Vec, -} - -impl CompilationCommandBuilder { - pub fn new() -> Self { - Self { - compiler: String::new(), - target: None, - cxx_toolchain_dir: None, - arch_flags: Vec::new(), - optimization: "2".to_string(), - project_root: None, - extra_flags: Vec::new(), - } - } - - pub fn set_compiler(mut self, compiler: &str) -> Self { - self.compiler = compiler.to_string(); - self - } - - pub fn set_target(mut self, target: &str) -> Self { - self.target = Some(target.to_string()); - self - } - - pub fn set_cxx_toolchain_dir(mut self, path: Option<&str>) -> Self { - self.cxx_toolchain_dir = path.map(|p| p.to_string()); - self - } - - pub fn add_arch_flags<'a>(mut self, flags: impl IntoIterator) -> Self { - self.arch_flags - .extend(flags.into_iter().map(|s| s.to_owned())); - - self - } - - pub fn set_opt_level(mut self, optimization: &str) -> Self { - self.optimization = optimization.to_string(); - self - } - - /// Sets the root path of all the generated test files. - pub fn set_project_root(mut self, path: &str) -> Self { - self.project_root = Some(path.to_string()); - self - } - - pub fn add_extra_flags<'a>(mut self, flags: impl IntoIterator) -> Self { - self.extra_flags - .extend(flags.into_iter().map(|s| s.to_owned())); - - self - } - - pub fn add_extra_flag(self, flag: &str) -> Self { - self.add_extra_flags([flag]) - } -} - -impl CompilationCommandBuilder { - pub fn into_cpp_compilation(self) -> CppCompilation { - let mut cpp_compiler = std::process::Command::new(self.compiler); - - if let Some(project_root) = self.project_root { - cpp_compiler.current_dir(project_root); - } - - let flags = std::env::var("CPPFLAGS").unwrap_or("".into()); - cpp_compiler.args(flags.split_whitespace()); - - cpp_compiler.arg(format!("-march={}", self.arch_flags.join("+"))); - - cpp_compiler.arg(format!("-O{}", self.optimization)); - - cpp_compiler.args(self.extra_flags); - - if let Some(target) = &self.target { - cpp_compiler.arg(format!("--target={target}")); - } - - CppCompilation(cpp_compiler) - } -} - -pub struct CppCompilation(std::process::Command); - -fn clone_command(command: &std::process::Command) -> std::process::Command { - let mut cmd = std::process::Command::new(command.get_program()); - if let Some(current_dir) = command.get_current_dir() { - cmd.current_dir(current_dir); - } - cmd.args(command.get_args()); - - for (key, val) in command.get_envs() { - cmd.env(key, val.unwrap_or_default()); - } - - cmd -} - -impl CppCompilation { - pub fn command_mut(&mut self) -> &mut std::process::Command { - &mut self.0 - } - - pub fn compile_object_file( - &self, - input: &str, - output: &str, - ) -> std::io::Result { - let mut cmd = clone_command(&self.0); - cmd.args([input, "-v", "-c", "-o", output]); - cmd.output() - } - - pub fn link_executable( - &self, - inputs: impl Iterator, - output: &str, - ) -> std::io::Result { - let mut cmd = clone_command(&self.0); - cmd.args(inputs); - cmd.args(["-o", output]); - cmd.output() - } -} diff --git a/crates/intrinsic-test/src/common/gen_c.rs b/crates/intrinsic-test/src/common/gen_c.rs index a95b4c36b7..bdf6f68d58 100644 --- a/crates/intrinsic-test/src/common/gen_c.rs +++ b/crates/intrinsic-test/src/common/gen_c.rs @@ -1,166 +1,42 @@ +use itertools::Itertools; + use crate::common::intrinsic::Intrinsic; -use super::argument::Argument; -use super::compare::INTRINSIC_DELIMITER; -use super::indentation::Indentation; use super::intrinsic_helpers::IntrinsicTypeDefinition; -// The number of times each intrinsic will be called. -const PASSES: u32 = 20; -const COMMON_HEADERS: [&str; 7] = [ - "iostream", - "string", - "cstring", - "iomanip", - "sstream", - "type_traits", - "cassert", -]; - -pub fn generate_c_test_loop( - w: &mut impl std::io::Write, - intrinsic: &Intrinsic, - indentation: Indentation, - additional: &str, - passes: u32, -) -> std::io::Result<()> { - let body_indentation = indentation.nested(); - writeln!( - w, - "{indentation}for (int i=0; i<{passes}; i++) {{\n\ - {loaded_args}\ - {body_indentation}auto __return_value = {intrinsic_call}({args});\n\ - {print_result}\n\ - {indentation}}}", - loaded_args = intrinsic.arguments.load_values_c(body_indentation), - intrinsic_call = intrinsic.name, - args = intrinsic.arguments.as_call_param_c(), - print_result = intrinsic - .results - .print_result_c(body_indentation, additional) - ) -} - -pub fn generate_c_constraint_blocks<'a, T: IntrinsicTypeDefinition + 'a>( - w: &mut impl std::io::Write, - intrinsic: &Intrinsic, - indentation: Indentation, - constraints: &mut (impl Iterator> + Clone), - name: String, -) -> std::io::Result<()> { - let Some(current) = constraints.next() else { - return generate_c_test_loop(w, intrinsic, indentation, &name, PASSES); - }; - - let body_indentation = indentation.nested(); - for i in current.constraint.iter().flat_map(|c| c.iter()) { - let ty = current.ty.c_type(); - - writeln!(w, "{indentation}{{")?; - - // TODO: Move to actually specifying the enum value - // instead of typecasting integers, for better clarity - // of generated code. - writeln!( - w, - "{body_indentation}const {ty} {} = ({ty}){i};", - current.generate_name() - )?; - - generate_c_constraint_blocks( - w, - intrinsic, - body_indentation, - &mut constraints.clone(), - format!("{name}-{i}"), - )?; - - writeln!(w, "{indentation}}}")?; - } - - Ok(()) -} - -// Compiles C test programs using specified compiler -pub fn create_c_test_function( - w: &mut impl std::io::Write, - intrinsic: &Intrinsic, -) -> std::io::Result<()> { - let indentation = Indentation::default(); - - writeln!(w, "int run_{}() {{", intrinsic.name)?; - - // Define the arrays of arguments. - let arguments = &intrinsic.arguments; - arguments.gen_arglists_c(w, indentation.nested(), PASSES)?; - - generate_c_constraint_blocks( - w, - intrinsic, - indentation.nested(), - &mut arguments.iter().rev().filter(|&i| i.has_constraint()), - Default::default(), - )?; - - writeln!(w, " return 0;")?; - writeln!(w, "}}")?; - - Ok(()) -} - -pub fn write_mod_cpp( +pub fn write_wrapper_c( w: &mut impl std::io::Write, notice: &str, platform_headers: &[&str], - forward_declarations: &str, intrinsics: &[Intrinsic], ) -> std::io::Result<()> { write!(w, "{notice}")?; - for header in COMMON_HEADERS.iter().chain(platform_headers.iter()) { - writeln!(w, "#include <{header}>")?; - } - - writeln!(w, "{}", forward_declarations)?; - - for intrinsic in intrinsics { - create_c_test_function(w, intrinsic)?; - } - - Ok(()) -} + writeln!(w, "#include ")?; + writeln!(w, "#include ")?; -pub fn write_main_cpp<'a>( - w: &mut impl std::io::Write, - arch_specific_definitions: &str, - arch_specific_headers: &[&str], - intrinsics: impl Iterator + Clone, -) -> std::io::Result<()> { - for header in COMMON_HEADERS.iter().chain(arch_specific_headers.iter()) { + for header in platform_headers { writeln!(w, "#include <{header}>")?; } - // NOTE: It's assumed that this value contains the required `ifdef`s. - writeln!(w, "{arch_specific_definitions }")?; - - for intrinsic in intrinsics.clone() { - writeln!(w, "extern int run_{intrinsic}(void);")?; - } - - writeln!(w, "int main(int argc, char **argv) {{")?; - for intrinsic in intrinsics { - writeln!( - w, - " std::cout << \"{INTRINSIC_DELIMITER}\" << std::endl;" - )?; - writeln!(w, " std::cout << \"{intrinsic}\" << std::endl;")?; - writeln!(w, " run_{intrinsic}();\n")?; + intrinsic.iter_specializations(|imm_values| { + writeln!( + w, + " +void {name}_wrapper{imm_arglist}({return_ty}* __dst{arglist}) {{ + *__dst = {name}({params}); +}}", + return_ty = intrinsic.results.c_type(), + name = intrinsic.name, + imm_arglist = imm_values + .iter() + .format_with("", |i, fmt| fmt(&format_args!("_{i}"))), + arglist = intrinsic.arguments.as_non_imm_arglist_c(), + params = intrinsic.arguments.as_call_params_c(&imm_values) + ) + })?; } - writeln!(w, " return 0;")?; - - writeln!(w, "}}")?; - Ok(()) } diff --git a/crates/intrinsic-test/src/common/gen_rust.rs b/crates/intrinsic-test/src/common/gen_rust.rs index 82b97701bb..d11bcbdce6 100644 --- a/crates/intrinsic-test/src/common/gen_rust.rs +++ b/crates/intrinsic-test/src/common/gen_rust.rs @@ -1,23 +1,53 @@ use itertools::Itertools; -use std::process::Command; -use super::compare::INTRINSIC_DELIMITER; use super::indentation::Indentation; use super::intrinsic_helpers::IntrinsicTypeDefinition; use crate::common::argument::ArgumentList; use crate::common::intrinsic::Intrinsic; +use crate::common::intrinsic_helpers::TypeKind; // The number of times each intrinsic will be called. pub(crate) const PASSES: u32 = 20; +const COMMON_RUST_DEFINITIONS: &str = r#" +macro_rules! make_nice { + ($($wrapper:ident ($inner:ty)),*) => {$( + #[derive(Debug, Copy, Clone)] + #[repr(transparent)] + pub struct $wrapper($inner); + + impl PartialEq for $wrapper { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 || (self.0.is_nan() && other.0.is_nan()) + } + } + + impl Eq for $wrapper {} + )*} +} + +make_nice!(NiceF16(f16), NiceF32(f32), NiceF64(f64)); +"#; + macro_rules! concatln { ($($lines:expr),* $(,)?) => { concat!($( $lines, "\n" ),*) }; } -fn write_cargo_toml_header(w: &mut impl std::io::Write, name: &str) -> std::io::Result<()> { - writeln!( +pub fn write_bin_cargo_toml( + w: &mut impl std::io::Write, + module_count: usize, +) -> std::io::Result<()> { + write!(w, concatln!("[workspace]", "members = ["))?; + for i in 0..module_count { + writeln!(w, " \"mod_{i}\",")?; + } + writeln!(w, "]") +} + +pub fn write_lib_cargo_toml(w: &mut impl std::io::Write, name: &str) -> std::io::Result<()> { + write!( w, concatln!( "[package]", @@ -26,6 +56,12 @@ fn write_cargo_toml_header(w: &mut impl std::io::Write, name: &str) -> std::io:: "authors = [{authors}]", "license = \"{license}\"", "edition = \"2018\"", + "", + "[dependencies]", + "core_arch = {{ path = \"../../crates/core_arch\" }}", + "", + "[build-dependencies]", + "cc = \"1\"" ), name = name, version = env!("CARGO_PKG_VERSION"), @@ -36,72 +72,12 @@ fn write_cargo_toml_header(w: &mut impl std::io::Write, name: &str) -> std::io:: ) } -pub fn write_bin_cargo_toml( - w: &mut impl std::io::Write, - module_count: usize, -) -> std::io::Result<()> { - write_cargo_toml_header(w, "intrinsic-test-programs")?; - - writeln!(w, "[dependencies]")?; - writeln!(w, "core_arch = {{ path = \"../crates/core_arch\" }}")?; - - for i in 0..module_count { - writeln!(w, "mod_{i} = {{ path = \"mod_{i}/\" }}")?; - } - - Ok(()) -} - -pub fn write_lib_cargo_toml(w: &mut impl std::io::Write, name: &str) -> std::io::Result<()> { - write_cargo_toml_header(w, name)?; - - writeln!(w, "[dependencies]")?; - writeln!(w, "core_arch = {{ path = \"../../crates/core_arch\" }}")?; - - Ok(()) -} - -pub fn write_main_rs<'a>( - w: &mut impl std::io::Write, - chunk_count: usize, - cfg: &str, - definitions: &str, - intrinsics: impl Iterator + Clone, -) -> std::io::Result<()> { - writeln!(w, "#![feature(simd_ffi)]")?; - writeln!(w, "#![feature(f16)]")?; - writeln!(w, "#![allow(unused)]")?; - - // Cargo will spam the logs if these warnings are not silenced. - writeln!(w, "#![allow(non_upper_case_globals)]")?; - writeln!(w, "#![allow(non_camel_case_types)]")?; - writeln!(w, "#![allow(non_snake_case)]")?; - - writeln!(w, "{cfg}")?; - writeln!(w, "{definitions}")?; - - for module in 0..chunk_count { - writeln!(w, "use mod_{module}::*;")?; - } - - writeln!(w, "fn main() {{")?; - - for binary in intrinsics { - writeln!(w, " println!(\"{INTRINSIC_DELIMITER}\");")?; - writeln!(w, " println!(\"{binary}\");")?; - writeln!(w, " run_{binary}();\n")?; - } - - writeln!(w, "}}")?; - - Ok(()) -} - pub fn write_lib_rs( w: &mut impl std::io::Write, notice: &str, cfg: &str, definitions: &str, + i: usize, intrinsics: &[Intrinsic], ) -> std::io::Result<()> { write!(w, "{notice}")?; @@ -117,13 +93,15 @@ pub fn write_lib_rs( writeln!(w, "{cfg}")?; + writeln!(w, "{}", COMMON_RUST_DEFINITIONS)?; + writeln!(w, "{definitions}")?; let mut seen = std::collections::HashSet::new(); for intrinsic in intrinsics { for arg in &intrinsic.arguments.args { - if !arg.has_constraint() && arg.ty.is_rust_vals_array_const() { + if !arg.has_constraint() { let name = arg.rust_vals_array_name().to_string(); if seen.insert(name) { @@ -133,190 +111,189 @@ pub fn write_lib_rs( } } + write_bindings_rust(w, i, intrinsics)?; + for intrinsic in intrinsics { - crate::common::gen_rust::create_rust_test_module(w, intrinsic)?; + create_rust_test(w, intrinsic)?; } Ok(()) } -pub fn compile_rust_programs( - toolchain: Option<&str>, - target: &str, - profile: &str, - linker: Option<&str>, -) -> bool { - /* If there has been a linker explicitly set from the command line then - * we want to set it via setting it in the RUSTFLAGS*/ - - // This is done because `toolchain` is None when - // the --generate-only flag is passed - if toolchain.is_none() { - return true; - } - - trace!("Building cargo command"); - - let mut cargo_command = Command::new("cargo"); - cargo_command.current_dir("rust_programs"); - - // Do not use the target directory of the workspace please. - cargo_command.env("CARGO_TARGET_DIR", "target"); - - if toolchain.is_some_and(|val| !val.is_empty()) { - cargo_command.arg(toolchain.unwrap()); - } - cargo_command.args(["build", "--target", target, "--profile", profile]); - - let mut rust_flags = "-Cdebuginfo=0".to_string(); - if let Some(linker) = linker { - rust_flags.push_str(" -C linker="); - rust_flags.push_str(linker); - rust_flags.push_str(" -C link-args=-static"); - - cargo_command.env("CPPFLAGS", "-fuse-ld=lld"); - } - - cargo_command.env("RUSTFLAGS", rust_flags); - - trace!("running cargo"); - - if log::log_enabled!(log::Level::Trace) { - cargo_command.stdout(std::process::Stdio::inherit()); - cargo_command.stderr(std::process::Stdio::inherit()); - } - - let output = cargo_command.output(); - trace!("cargo is done"); - - if let Ok(output) = output { - if output.status.success() { - true - } else { - error!( - "Failed to compile code for rust intrinsics\n\nstdout:\n{}\n\nstderr:\n{}", - std::str::from_utf8(&output.stdout).unwrap_or(""), - std::str::from_utf8(&output.stderr).unwrap_or("") - ); - false - } - } else { - error!("Command failed: {output:#?}"); - false - } -} - -pub fn generate_rust_test_loop( +fn generate_rust_test_loop( w: &mut impl std::io::Write, intrinsic: &Intrinsic, - indentation: Indentation, - specializations: &[Vec], passes: u32, ) -> std::io::Result<()> { let intrinsic_name = &intrinsic.name; // Each function (and each specialization) has its own type. Erase that type with a cast. - let mut coerce = String::from("unsafe fn("); + let mut coerce = String::from("fn("); + let mut c_coerce = String::from("fn(_, "); for _ in intrinsic.arguments.iter().filter(|a| !a.has_constraint()) { coerce += "_, "; + c_coerce += "_, "; } coerce += ") -> _"; - - match specializations { - [] => { - writeln!(w, " let specializations = [(\"\", {intrinsic_name})];")?; - } - [const_args] if const_args.is_empty() => { - writeln!(w, " let specializations = [(\"\", {intrinsic_name})];")?; - } - _ => { - writeln!(w, " let specializations = [")?; - - for specialization in specializations { - let mut specialization: Vec<_> = - specialization.iter().map(|d| d.to_string()).collect(); - - let const_args = specialization.join(","); - - // The identifier is reversed. - specialization.reverse(); - let id = specialization.join("-"); - - writeln!( - w, - " (\"-{id}\", {intrinsic_name}::<{const_args}> as {coerce})," - )?; - } - - writeln!(w, " ];")?; - } + c_coerce += ")"; + + if intrinsic + .arguments + .iter() + .filter(|arg| arg.has_constraint()) + .count() + == 0 + { + writeln!( + w, + " let specializations = [(\"\", {intrinsic_name}, {intrinsic_name}_wrapper)];" + )?; + } else { + writeln!(w, " let specializations = [")?; + + intrinsic.iter_specializations(|imm_values| { + writeln!( + w, + " (\"{const_args}\", {intrinsic_name}::<{const_args}> as unsafe {coerce}, {intrinsic_name}_wrapper_{c_const_args} as unsafe extern \"C\" {c_coerce}),", + const_args = imm_values.iter().join(","), + c_const_args = imm_values.iter().join("_"), + ) + })?; + + writeln!(w, " ];")?; } + let (cast_prefix, cast_suffix) = if intrinsic.results.is_simd() { + ( + format!( + "std::mem::transmute::<_, [{}; {}]>(", + intrinsic.results.rust_scalar_type().replace("f", "NiceF"), + intrinsic.results.num_lanes() * intrinsic.results.num_vectors() + ), + ")", + ) + } else if intrinsic.results.kind == TypeKind::Float { + ( + match intrinsic.results.inner_size() { + 16 => format!("NiceF16("), + 32 => format!("NiceF32("), + 64 => format!("NiceF64("), + _ => unimplemented!(), + }, + ")", + ) + } else { + ("".to_string(), "") + }; + write!( w, concatln!( - " for (id, f) in specializations {{", + " for (id, rust, c) in specializations {{", " for i in 0..{passes} {{", " unsafe {{", "{loaded_args}", - " let __return_value = f({args});", - " println!(\"Result {{id}}-{{}}: {{:?}}\", i + 1, {return_value});", + " let __rust_return_value = rust({rust_args});", + "", + " let mut __c_return_value = std::mem::MaybeUninit::uninit();", + " c(__c_return_value.as_mut_ptr(){c_args});", + " let __c_return_value = __c_return_value.assume_init();", + "", + " assert_eq!({cast_prefix}__rust_return_value{cast_suffix}, {cast_prefix}__c_return_value{cast_suffix}, \"{{id}}\");", " }}", " }}", " }}", ), - loaded_args = intrinsic.arguments.load_values_rust(indentation.nest_by(4)), - args = intrinsic.arguments.as_call_param_rust(), - return_value = intrinsic.results.print_result_rust(), + loaded_args = intrinsic + .arguments + .load_values_rust(Indentation::default().nest_by(4)), + rust_args = intrinsic.arguments.as_call_param_rust(), + c_args = intrinsic.arguments.as_c_call_param_rust(), passes = passes, + cast_prefix = cast_prefix, + cast_suffix = cast_suffix, ) } -/// Generate the specializations (unique sequences of const-generic arguments) for this intrinsic. -fn generate_rust_specializations( - constraints: &mut impl Iterator>, -) -> Vec> { - let mut specializations = vec![vec![]]; - - for constraint in constraints { - specializations = constraint - .flat_map(|right| { - specializations.iter().map(move |left| { - let mut left = left.clone(); - left.push(i32::try_from(right).unwrap()); - left - }) - }) - .collect(); - } +fn create_rust_test( + w: &mut impl std::io::Write, + intrinsic: &Intrinsic, +) -> std::io::Result<()> { + trace!("generating `{}`", intrinsic.name); + + write!( + w, + concatln!("#[test]", "fn test_{intrinsic_name}() {{"), + intrinsic_name = intrinsic.name, + )?; + + generate_rust_test_loop(w, intrinsic, PASSES)?; + + writeln!(w, "}}")?; - specializations + Ok(()) } -// Top-level function to create complete test program -pub fn create_rust_test_module( +pub fn write_bindings_rust( w: &mut impl std::io::Write, - intrinsic: &Intrinsic, + i: usize, + intrinsics: &[Intrinsic], ) -> std::io::Result<()> { - trace!("generating `{}`", intrinsic.name); - let indentation = Indentation::default(); + writeln!( + w, + concatln!( + "#[allow(improper_ctypes)]", + "#[link(name = \"wrapper_{i}\")]", + "unsafe extern \"C\" {{" + ), + i = i + )?; - writeln!(w, "pub fn run_{}() {{", intrinsic.name)?; + for intrinsic in intrinsics { + intrinsic.iter_specializations(|imm_values| { + writeln!( + w, + " fn {name}_wrapper{imm_arglist}(__dst: *mut {return_ty}{arglist});", + return_ty = intrinsic.results.rust_type(), + name = intrinsic.name, + imm_arglist = imm_values + .iter() + .format_with("", |i, fmt| fmt(&format_args!("_{i}"))), + arglist = intrinsic.arguments.as_non_imm_arglist_rust(), + ) + })?; + } - // Define the arrays of arguments. - let arguments = &intrinsic.arguments; - arguments.gen_arglists_rust(w, indentation.nested(), PASSES)?; + writeln!(w, "}}") +} - // Define any const generics as `const` items, then generate the actual test loop. - let specializations = generate_rust_specializations( - &mut arguments - .iter() - .filter_map(|i| i.constraint.as_ref().map(|v| v.iter())), - ); +pub fn write_build_rs( + w: &mut impl std::io::Write, + i: usize, + arch_flags: &[&str], +) -> std::io::Result<()> { + const COMMON_FLAGS: &[&str] = &["-ffp-contract=off", "-ffp-model=strict", "-Wno-narrowing"]; - generate_rust_test_loop(w, intrinsic, indentation, &specializations, PASSES)?; + write!( + w, + concatln!( + "fn main() {{", + " cc::Build::new()", + " .file(\"../../c_programs/wrapper_{i}.c\")", + " .opt_level(2)", + " .flags(&[", + ), + i = i + )?; - writeln!(w, "}}")?; + let indentation = Indentation::default().nest_by(2); + for flag in COMMON_FLAGS.iter().chain(arch_flags) { + writeln!(w, "{indentation}\"{flag}\",")?; + } - Ok(()) + write!( + w, + concatln!(" ])", " .compile(\"wrapper_{i}\");", "}}"), + i = i + ) } diff --git a/crates/intrinsic-test/src/common/intrinsic.rs b/crates/intrinsic-test/src/common/intrinsic.rs index 81f6d6d8b5..76e5959153 100644 --- a/crates/intrinsic-test/src/common/intrinsic.rs +++ b/crates/intrinsic-test/src/common/intrinsic.rs @@ -1,3 +1,5 @@ +use crate::common::constraint::Constraint; + use super::argument::ArgumentList; use super::intrinsic_helpers::IntrinsicTypeDefinition; @@ -16,3 +18,36 @@ pub struct Intrinsic { /// Any architecture-specific tags. pub arch_tags: Vec, } + +fn recurse_specializations<'a, E>( + constraints: &mut (impl Iterator + Clone), + imm_values: &mut Vec, + f: &mut impl FnMut(&[i64]) -> Result<(), E>, +) -> Result<(), E> { + if let Some(current) = constraints.next() { + for i in current.iter() { + imm_values.push(i); + recurse_specializations(&mut constraints.clone(), imm_values, f)?; + imm_values.pop(); + } + Ok(()) + } else { + f(&imm_values) + } +} + +impl Intrinsic { + pub fn iter_specializations( + &self, + mut f: impl FnMut(&[i64]) -> Result<(), E>, + ) -> Result<(), E> { + recurse_specializations( + &mut self + .arguments + .iter() + .filter_map(|arg| arg.constraint.as_ref()), + &mut Vec::new(), + &mut f, + ) + } +} diff --git a/crates/intrinsic-test/src/common/intrinsic_helpers.rs b/crates/intrinsic-test/src/common/intrinsic_helpers.rs index a14d7ef05f..06512801ce 100644 --- a/crates/intrinsic-test/src/common/intrinsic_helpers.rs +++ b/crates/intrinsic-test/src/common/intrinsic_helpers.rs @@ -5,7 +5,6 @@ use std::str::FromStr; use itertools::Itertools as _; -use super::cli::Language; use super::indentation::Indentation; use super::values::value_for_array; @@ -94,6 +93,7 @@ impl TypeKind { Self::Poly => "u", Self::Char(Sign::Unsigned) => "u", Self::Char(Sign::Signed) => "i", + Self::Mask => "u", _ => unreachable!("Unused type kind: {self:#?}"), } } @@ -154,67 +154,7 @@ impl IntrinsicType { self.ptr } - pub fn c_scalar_type(&self) -> String { - match self.kind() { - TypeKind::Char(_) => String::from("char"), - TypeKind::Vector => String::from("int32_t"), - _ => format!( - "{prefix}{bits}_t", - prefix = self.kind().c_prefix(), - bits = self.inner_size() - ), - } - } - - pub fn c_promotion(&self) -> &str { - match *self { - IntrinsicType { - kind, - bit_len: Some(8), - .. - } => match kind { - TypeKind::Int(Sign::Signed) => "int", - TypeKind::Int(Sign::Unsigned) => "unsigned int", - TypeKind::Poly => "uint8_t", - _ => "", - }, - IntrinsicType { - kind: TypeKind::Poly, - bit_len: Some(bit_len), - .. - } => match bit_len { - 8 => unreachable!("handled above"), - 16 => "uint16_t", - 32 => "uint32_t", - 64 => "uint64_t", - 128 => "", - _ => panic!("invalid bit_len"), - }, - IntrinsicType { - kind: TypeKind::Float, - bit_len: Some(bit_len), - .. - } => match bit_len { - 16 => "float16_t", - 32 => "float", - 64 => "double", - 128 => "", - _ => panic!("invalid bit_len"), - }, - IntrinsicType { - kind: TypeKind::Char(_), - .. - } => "char", - _ => "", - } - } - - pub fn populate_random( - &self, - indentation: Indentation, - loads: u32, - language: &Language, - ) -> String { + pub fn populate_random(&self, indentation: Indentation, loads: u32) -> String { match self { IntrinsicType { bit_len: Some(bit_len @ (1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 16 | 32 | 64)), @@ -224,13 +164,9 @@ impl IntrinsicType { vec_len, .. } => { - let (prefix, suffix) = match language { - Language::Rust => ('[', ']'), - Language::C => ('{', '}'), - }; let body_indentation = indentation.nested(); format!( - "{prefix}\n{body}\n{indentation}{suffix}", + "[\n{body}\n{indentation}]", body = (0..(simd_len.unwrap_or(1) * vec_len.unwrap_or(1) + loads - 1)) .format_with(",\n", |i, fmt| { let src = value_for_array(*bit_len, i); @@ -241,13 +177,7 @@ impl IntrinsicType { let mask = !0u64 >> (64 - *bit_len); let ones_compl = src ^ mask; let twos_compl = ones_compl + 1; - if (twos_compl == src) && (language == &Language::C) { - // `src` is INT*_MIN. C requires `-0x7fffffff - 1` to avoid - // undefined literal overflow behaviour. - fmt(&format_args!("{body_indentation}-{ones_compl:#x} - 1")) - } else { - fmt(&format_args!("{body_indentation}-{twos_compl:#x}")) - } + fmt(&format_args!("{body_indentation}-{twos_compl:#x}")) } else { fmt(&format_args!("{body_indentation}{src:#x}")) } @@ -261,20 +191,11 @@ impl IntrinsicType { vec_len, .. } => { - let (prefix, cast_prefix, cast_suffix, suffix) = match (language, bit_len) { - (&Language::Rust, 16) => ('[', "f16::from_bits(", ")", ']'), - (&Language::Rust, 32) => ('[', "f32::from_bits(", ")", ']'), - (&Language::Rust, 64) => ('[', "f64::from_bits(", ")", ']'), - (&Language::C, 16) => ('{', "cast(", ")", '}'), - (&Language::C, 32) => ('{', "cast(", ")", '}'), - (&Language::C, 64) => ('{', "cast(", ")", '}'), - _ => unreachable!(), - }; format!( - "{prefix}\n{body}\n{indentation}{suffix}", + "[\n{body}\n{indentation}]", body = (0..(simd_len.unwrap_or(1) * vec_len.unwrap_or(1) + loads - 1)) .format_with(",\n", |i, fmt| fmt(&format_args!( - "{indentation}{cast_prefix}{src:#x}{cast_suffix}", + "{indentation}f{bit_len}::from_bits({src:#x})", indentation = indentation.nested(), src = value_for_array(*bit_len, i) ))) @@ -287,14 +208,10 @@ impl IntrinsicType { vec_len, .. } => { - let (prefix, suffix) = match language { - Language::Rust => ('[', ']'), - Language::C => ('{', '}'), - }; let body_indentation = indentation.nested(); let effective_bit_len = 32; format!( - "{prefix}\n{body}\n{indentation}{suffix}", + "[\n{body}\n{indentation}]", body = (0..(vec_len.unwrap_or(1) * simd_len.unwrap_or(1) + loads - 1)) .format_with(",\n", |i, fmt| { let src = value_for_array(effective_bit_len, i); @@ -304,13 +221,7 @@ impl IntrinsicType { let mask = !0u64 >> (64 - effective_bit_len); let ones_compl = src ^ mask; let twos_compl = ones_compl + 1; - if (twos_compl == src) && (language == &Language::C) { - // `src` is INT*_MIN. C requires `-0x7fffffff - 1` to avoid - // undefined literal overflow behaviour. - fmt(&format_args!("{body_indentation}-{ones_compl:#x} - 1")) - } else { - fmt(&format_args!("{body_indentation}-{twos_compl:#x}")) - } + fmt(&format_args!("{body_indentation}-{twos_compl:#x}")) } else { fmt(&format_args!("{body_indentation}{src:#x}")) } @@ -320,57 +231,20 @@ impl IntrinsicType { _ => unimplemented!("populate random: {self:#?}"), } } - - pub fn is_rust_vals_array_const(&self) -> bool { - match self { - // Floats have to be loaded at runtime for stable NaN conversion. - IntrinsicType { - kind: TypeKind::Float, - .. - } => false, - IntrinsicType { - kind: TypeKind::Int(_) | TypeKind::Poly, - .. - } => true, - _ => true, - } - } - - pub fn as_call_param_c(&self, name: &String) -> String { - if self.ptr { - format!("&{name}") - } else { - name.clone() - } - } } pub trait IntrinsicTypeDefinition: Deref { /// Determines the load function for this type. /// can be implemented in an `impl` block - fn get_load_function(&self, _language: Language) -> String; - - /// can be implemented in an `impl` block - fn get_lane_function(&self) -> String; + fn get_load_function(&self) -> String; /// Gets a string containing the typename for this type in C format. /// can be directly defined in `impl` blocks fn c_type(&self) -> String; + /// Gets a string containing the typename for this type in Rust format. /// can be directly defined in `impl` blocks - fn c_single_vector_type(&self) -> String; - - /// Generates a std::cout for the intrinsics results that will match the - /// rust debug output format for the return type. The generated line assumes - /// there is an int i in scope which is the current pass number. - fn print_result_c(&self, indentation: Indentation, additional: &str) -> String; - - /// Generates a std::cout for the intrinsics results that will match the - /// rust debug output format for the return type. The generated line assumes - /// there is an int i in scope which is the current pass number. - fn print_result_rust(&self) -> String { - String::from("format_args!(\"{__return_value:.150?}\")") - } + fn rust_type(&self) -> String; /// To enable architecture-specific logic fn rust_scalar_type(&self) -> String { @@ -380,13 +254,4 @@ pub trait IntrinsicTypeDefinition: Deref { bits = self.inner_size() ) } - - fn generate_final_type_cast(&self) -> String { - let type_data = self.c_promotion(); - if type_data.len() > 2 { - format!("({type_data})") - } else { - String::new() - } - } } diff --git a/crates/intrinsic-test/src/common/mod.rs b/crates/intrinsic-test/src/common/mod.rs index a1062b3a87..86849f7db3 100644 --- a/crates/intrinsic-test/src/common/mod.rs +++ b/crates/intrinsic-test/src/common/mod.rs @@ -1,38 +1,32 @@ -use std::fs::File; +use std::{fs::File, io}; use rayon::prelude::*; use cli::ProcessedCli; use crate::common::{ - compile_c::CppCompilation, - gen_c::{write_main_cpp, write_mod_cpp}, - gen_rust::{ - compile_rust_programs, write_bin_cargo_toml, write_lib_cargo_toml, write_lib_rs, - write_main_rs, - }, + gen_c::write_wrapper_c, + gen_rust::{write_bin_cargo_toml, write_build_rs, write_lib_cargo_toml, write_lib_rs}, intrinsic::Intrinsic, intrinsic_helpers::IntrinsicTypeDefinition, }; pub mod argument; pub mod cli; -pub mod compare; -pub mod compile_c; pub mod constraint; -pub mod gen_c; -pub mod gen_rust; -pub mod indentation; pub mod intrinsic; pub mod intrinsic_helpers; -pub mod values; + +mod gen_c; +mod gen_rust; +mod indentation; +mod values; /// Architectures must support this trait /// to be successfully tested. pub trait SupportedArchitectureTest { type IntrinsicImpl: IntrinsicTypeDefinition + Sync; - fn cli_options(&self) -> &ProcessedCli; fn intrinsics(&self) -> &[Intrinsic]; fn create(cli_options: ProcessedCli) -> Self; @@ -40,118 +34,40 @@ pub trait SupportedArchitectureTest { const NOTICE: &str; const PLATFORM_C_HEADERS: &[&str]; - const PLATFORM_C_DEFINITIONS: &str; - const PLATFORM_C_FORWARD_DECLARATIONS: &str; const PLATFORM_RUST_CFGS: &str; const PLATFORM_RUST_DEFINITIONS: &str; - fn cpp_compilation(&self) -> Option; - - fn build_c_file(&self) -> bool { - let (chunk_size, chunk_count) = manual_chunk(self.intrinsics().len(), 400); + fn arch_flags(&self) -> Vec<&str>; - let cpp_compiler_wrapped = self.cpp_compilation(); + fn generate_c_file(&self) { + let (chunk_size, _chunk_count) = manual_chunk(self.intrinsics().len()); std::fs::create_dir_all("c_programs").unwrap(); self.intrinsics() .par_chunks(chunk_size) .enumerate() .map(|(i, chunk)| { - let c_filename = format!("c_programs/mod_{i}.cpp"); + let c_filename = format!("c_programs/wrapper_{i}.c"); let mut file = File::create(&c_filename).unwrap(); - let mod_file_write_result = write_mod_cpp( - &mut file, - Self::NOTICE, - Self::PLATFORM_C_HEADERS, - Self::PLATFORM_C_FORWARD_DECLARATIONS, - chunk, - ); - - if let Err(error) = mod_file_write_result { - return Err(format!("Error writing to mod_{i}.cpp: {error:?}")); - } - - // compile this cpp file into a .o file. - // - // This is done because `cpp_compiler_wrapped` is None when - // the --generate-only flag is passed - trace!("compiling mod_{i}.cpp"); - if let Some(cpp_compiler) = cpp_compiler_wrapped.as_ref() { - let compile_output = cpp_compiler - .compile_object_file(&format!("mod_{i}.cpp"), &format!("mod_{i}.o")) - .map_err(|e| format!("Error compiling mod_{i}.cpp: {e:?}"))?; - - assert!( - compile_output.status.success(), - "{}", - String::from_utf8_lossy(&compile_output.stderr) - ); - - trace!("finished compiling mod_{i}.cpp"); - } - Ok(()) + write_wrapper_c(&mut file, Self::NOTICE, Self::PLATFORM_C_HEADERS, chunk) }) - .collect::>() + .collect::>() .unwrap(); - - let mut file = File::create("c_programs/main.cpp").unwrap(); - write_main_cpp( - &mut file, - Self::PLATFORM_C_DEFINITIONS, - Self::PLATFORM_C_HEADERS, - self.intrinsics().iter().map(|i| i.name.as_str()), - ) - .unwrap(); - - // This is done because `cpp_compiler_wrapped` is None when - // the --generate-only flag is passed - if let Some(cpp_compiler) = cpp_compiler_wrapped.as_ref() { - // compile this cpp file into a .o file - trace!("compiling main.cpp"); - let output = cpp_compiler - .compile_object_file("main.cpp", "intrinsic-test-programs.o") - .unwrap(); - assert!(output.status.success(), "{output:?}"); - - let object_files = (0..chunk_count) - .map(|i| format!("mod_{i}.o")) - .chain(["intrinsic-test-programs.o".to_owned()]); - - let output = cpp_compiler - .link_executable(object_files, "intrinsic-test-programs") - .unwrap(); - assert!(output.status.success(), "{output:?}"); - } - - true } - fn build_rust_file(&self) -> bool { - std::fs::create_dir_all("rust_programs/src").unwrap(); + fn generate_rust_file(&self) { + let arch_flags = self.arch_flags(); - let (chunk_size, chunk_count) = manual_chunk(self.intrinsics().len(), 400); + std::fs::create_dir_all("rust_programs").unwrap(); + + let (chunk_size, chunk_count) = manual_chunk(self.intrinsics().len()); let mut cargo = File::create("rust_programs/Cargo.toml").unwrap(); write_bin_cargo_toml(&mut cargo, chunk_count).unwrap(); - let mut main_rs = File::create("rust_programs/src/main.rs").unwrap(); - write_main_rs( - &mut main_rs, - chunk_count, - Self::PLATFORM_RUST_CFGS, - "", - self.intrinsics().iter().map(|i| i.name.as_str()), - ) - .unwrap(); - - let target = &self.cli_options().target; - let profile = &self.cli_options().profile; - let toolchain = self.cli_options().toolchain.as_deref(); - let linker = self.cli_options().linker.as_deref(); - self.intrinsics() - .par_chunks(chunk_size) + .chunks(chunk_size) .enumerate() .map(|(i, chunk)| { std::fs::create_dir_all(format!("rust_programs/mod_{i}/src"))?; @@ -165,6 +81,7 @@ pub trait SupportedArchitectureTest { Self::NOTICE, Self::PLATFORM_RUST_CFGS, Self::PLATFORM_RUST_DEFINITIONS, + i, chunk, )?; @@ -174,41 +91,20 @@ pub trait SupportedArchitectureTest { write_lib_cargo_toml(&mut file, &format!("mod_{i}"))?; + let build_rs_filename = format!("rust_programs/mod_{i}/build.rs"); + trace!("generating `{build_rs_filename}`"); + let mut file = File::create(build_rs_filename).unwrap(); + + write_build_rs(&mut file, i, &arch_flags).unwrap(); + Ok(()) }) .collect::>() .unwrap(); - - compile_rust_programs(toolchain, target, profile, linker) - } - - fn compare_outputs(&self) -> bool { - if self.cli_options().toolchain.is_some() { - let intrinsics_name_list = self - .intrinsics() - .iter() - .map(|i| i.name.clone()) - .collect::>(); - - compare::compare_outputs( - &intrinsics_name_list, - &self.cli_options().runner, - &self.cli_options().target, - &self.cli_options().profile, - ) - } else { - true - } } } -// pub fn chunk_info(intrinsic_count: usize) -> (usize, usize) { -// let available_parallelism = std::thread::available_parallelism().unwrap().get(); -// let chunk_size = intrinsic_count.div_ceil(Ord::min(available_parallelism, intrinsic_count)); - -// (chunk_size, intrinsic_count.div_ceil(chunk_size)) -// } - -pub fn manual_chunk(intrinsic_count: usize, chunk_size: usize) -> (usize, usize) { - (chunk_size, intrinsic_count.div_ceil(chunk_size)) +pub fn manual_chunk(intrinsic_count: usize) -> (usize, usize) { + let ncores = std::thread::available_parallelism().unwrap().into(); + (intrinsic_count.div_ceil(ncores), ncores) } diff --git a/crates/intrinsic-test/src/main.rs b/crates/intrinsic-test/src/main.rs index e5c846877c..9f57c99f12 100644 --- a/crates/intrinsic-test/src/main.rs +++ b/crates/intrinsic-test/src/main.rs @@ -15,27 +15,21 @@ fn main() { let args: Cli = clap::Parser::parse(); let processed_cli_options = ProcessedCli::new(args); - match processed_cli_options.target.as_str() { - "aarch64-unknown-linux-gnu" - | "armv7-unknown-linux-gnueabihf" - | "aarch64_be-unknown-linux-gnu" => run(ArmArchitectureTest::create(processed_cli_options)), - - "x86_64-unknown-linux-gnu" => run(X86ArchitectureTest::create(processed_cli_options)), - _ => std::process::exit(0), + if processed_cli_options.target.starts_with("arm") + | processed_cli_options.target.starts_with("aarch64") + { + run(ArmArchitectureTest::create(processed_cli_options)) + } else if processed_cli_options.target.starts_with("x86") { + run(X86ArchitectureTest::create(processed_cli_options)) + } else { + unimplemented!("Unsupported target {}", processed_cli_options.target) } } fn run(test_environment: impl SupportedArchitectureTest) { info!("building C binaries"); - if !test_environment.build_c_file() { - std::process::exit(2); - } + test_environment.generate_c_file(); + info!("building Rust binaries"); - if !test_environment.build_rust_file() { - std::process::exit(3); - } - info!("Running binaries"); - if !test_environment.compare_outputs() { - std::process::exit(1); - } + test_environment.generate_rust_file(); } diff --git a/crates/intrinsic-test/src/x86/compile.rs b/crates/intrinsic-test/src/x86/compile.rs deleted file mode 100644 index 65cd291b1b..0000000000 --- a/crates/intrinsic-test/src/x86/compile.rs +++ /dev/null @@ -1,59 +0,0 @@ -use crate::common::cli::ProcessedCli; -use crate::common::compile_c::{CompilationCommandBuilder, CppCompilation}; - -pub fn build_cpp_compilation(config: &ProcessedCli) -> Option { - let cpp_compiler = config.cpp_compiler.as_ref()?; - - // -ffp-contract=off emulates Rust's approach of not fusing separate mul-add operations - let mut command = CompilationCommandBuilder::new() - .add_arch_flags(["icelake-client"]) - .set_compiler(cpp_compiler) - .set_target(&config.target) - .set_opt_level("2") - .set_cxx_toolchain_dir(config.cxx_toolchain_dir.as_deref()) - .set_project_root("c_programs") - .add_extra_flags(vec![ - "-ffp-contract=off", - "-Wno-narrowing", - "-mavx", - "-mavx2", - "-mavx512f", - "-msse2", - "-mavx512vl", - "-mavx512bw", - "-mavx512dq", - "-mavx512cd", - "-mavx512fp16", - "-msha512", - "-msm3", - "-msm4", - "-mavxvnni", - "-mavxvnniint8", - "-mavxneconvert", - "-mavxifma", - "-mavxvnniint16", - "-mavx512bf16", - "-mavx512bitalg", - "-mavx512ifma", - "-mavx512vbmi", - "-mavx512vbmi2", - "-mavx512vnni", - "-mavx512vpopcntdq", - "-mavx512vp2intersect", - "-mbmi", - "-mbmi2", - "-mgfni", - "-mvaes", - "-mvpclmulqdq", - "-ferror-limit=1000", - "-std=c++23", - ]); - - if !cpp_compiler.contains("clang") { - command = command.add_extra_flag("-flax-vector-conversions"); - } - - let cpp_compiler = command.into_cpp_compilation(); - - Some(cpp_compiler) -} diff --git a/crates/intrinsic-test/src/x86/config.rs b/crates/intrinsic-test/src/x86/config.rs index 491dbb5147..68737ab5ac 100644 --- a/crates/intrinsic-test/src/x86/config.rs +++ b/crates/intrinsic-test/src/x86/config.rs @@ -3,7 +3,6 @@ pub const NOTICE: &str = "\ // test are derived from an XML specification, published under the same license as the // `intrinsic-test` crate.\n"; -// Format f16 values (and vectors containing them) in a way that is consistent with C. pub const PLATFORM_RUST_DEFINITIONS: &str = r#" use core_arch::arch::x86_64::*; @@ -129,206 +128,11 @@ unsafe fn _mm512_loadu_epi64_to___m512(mem_addr: *const i64) -> __m512 { _mm512_castsi512_ps(_mm512_loadu_epi64(mem_addr)) } -#[inline] -fn debug_simd_finish( - formatter: &mut core::fmt::Formatter<'_>, - type_name: &str, - array: &[T; N], -) -> core::fmt::Result { - core::fmt::Formatter::debug_tuple_fields_finish( - formatter, - type_name, - &core::array::from_fn::<&dyn core::fmt::Debug, N, _>(|i| &array[i]), - ) -} - -trait DebugAs { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result; -} - -impl DebugAs for T { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "{self}") - } -} - -macro_rules! impl_debug_as { - ($simd:ty, $name:expr, $bits:expr, [$($type:ty),+]) => { - $( - impl DebugAs<$type> for $simd { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - const ELEMENT_BITS: usize = core::mem::size_of::<$type>() * 8; - const NUM_ELEMENTS: usize = $bits / ELEMENT_BITS; - let array = unsafe { core::mem::transmute::<_, [$type; NUM_ELEMENTS]>(*self) }; - debug_simd_finish(f, $name, &array) - } - } - )+ - }; -} - -impl_debug_as!(__m128i, "__m128i", 128, [u8, i8, u16, i16, u32, i32, u64, i64, f16]); -impl_debug_as!(__m256i, "__m256i", 256, [u8, i8, u16, i16, u32, i32, u64, i64]); -impl_debug_as!(__m512i, "__m512i", 512, [u8, i8, u16, i16, u32, i32, u64, i64]); -impl_debug_as!(__m128h, "__m128h", 128, [f32]); -impl_debug_as!(__m256h, "__m256h", 256, [f32]); -impl_debug_as!(__m512h, "__m512h", 512, [f32]); - -fn debug_as(x: V) -> impl core::fmt::Debug -where V: DebugAs -{ - struct DebugWrapper(V, core::marker::PhantomData); - impl, T> core::fmt::Debug for DebugWrapper { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - self.0.fmt(f) - } - } - DebugWrapper(x, core::marker::PhantomData) -} - -"#; - -pub const PLATFORM_C_FORWARD_DECLARATIONS: &str = r#" -#ifndef X86_DECLARATIONS -#define X86_DECLARATIONS - typedef _Float16 float16_t; - typedef float float32_t; - typedef double float64_t; - - #define __int64 long long - #define __int32 int - - std::ostream& operator<<(std::ostream& os, _Float16 value); - std::ostream& operator<<(std::ostream& os, __m128i value); - std::ostream& operator<<(std::ostream& os, __m256i value); - std::ostream& operator<<(std::ostream& os, __m512i value); - std::ostream& operator<<(std::ostream& os, __mmask8 value); - - #define _mm512_extract_intrinsic_test_epi8(m, lane) \ - _mm_extract_epi8(_mm512_extracti64x2_epi64((m), (lane) / 16), (lane) % 16) - - #define _mm512_extract_intrinsic_test_epi16(m, lane) \ - _mm_extract_epi16(_mm512_extracti64x2_epi64((m), (lane) / 8), (lane) % 8) - - #define _mm512_extract_intrinsic_test_epi32(m, lane) \ - _mm_extract_epi32(_mm512_extracti64x2_epi64((m), (lane) / 4), (lane) % 4) - - #define _mm512_extract_intrinsic_test_epi64(m, lane) \ - _mm_extract_epi64(_mm512_extracti64x2_epi64((m), (lane) / 2), (lane) % 2) - - // Load f16 (__m128h) and cast to integer (__m128i) - #define _mm_loadu_ph_to___m128i(mem_addr) _mm_castph_si128(_mm_loadu_ph(mem_addr)) - #define _mm256_loadu_ph_to___m256i(mem_addr) _mm256_castph_si256(_mm256_loadu_ph(mem_addr)) - #define _mm512_loadu_ph_to___m512i(mem_addr) _mm512_castph_si512(_mm512_loadu_ph(mem_addr)) - - // Load f32 (__m128) and cast to f16 (__m128h) - #define _mm_loadu_ps_to___m128h(mem_addr) _mm_castps_ph(_mm_loadu_ps(mem_addr)) - #define _mm256_loadu_ps_to___m256h(mem_addr) _mm256_castps_ph(_mm256_loadu_ps(mem_addr)) - #define _mm512_loadu_ps_to___m512h(mem_addr) _mm512_castps_ph(_mm512_loadu_ps(mem_addr)) - - // Load integer types and cast to double (__m128d, __m256d, __m512d) - #define _mm_loadu_epi16_to___m128d(mem_addr) _mm_castsi128_pd(_mm_loadu_si128((__m128i const*)(mem_addr))) - #define _mm256_loadu_epi16_to___m256d(mem_addr) _mm256_castsi256_pd(_mm256_loadu_si256((__m256i const*)(mem_addr))) - #define _mm512_loadu_epi16_to___m512d(mem_addr) _mm512_castsi512_pd(_mm512_loadu_si512((__m512i const*)(mem_addr))) - - #define _mm_loadu_epi32_to___m128d(mem_addr) _mm_castsi128_pd(_mm_loadu_si128((__m128i const*)(mem_addr))) - #define _mm256_loadu_epi32_to___m256d(mem_addr) _mm256_castsi256_pd(_mm256_loadu_si256((__m256i const*)(mem_addr))) - #define _mm512_loadu_epi32_to___m512d(mem_addr) _mm512_castsi512_pd(_mm512_loadu_si512((__m512i const*)(mem_addr))) - - #define _mm_loadu_epi64_to___m128d(mem_addr) _mm_castsi128_pd(_mm_loadu_si128((__m128i const*)(mem_addr))) - #define _mm256_loadu_epi64_to___m256d(mem_addr) _mm256_castsi256_pd(_mm256_loadu_si256((__m256i const*)(mem_addr))) - #define _mm512_loadu_epi64_to___m512d(mem_addr) _mm512_castsi512_pd(_mm512_loadu_si512((__m512i const*)(mem_addr))) - - // Load integer types and cast to float (__m128, __m256, __m512) - #define _mm_loadu_epi16_to___m128(mem_addr) _mm_castsi128_ps(_mm_loadu_si128((__m128i const*)(mem_addr))) - #define _mm256_loadu_epi16_to___m256(mem_addr) _mm256_castsi256_ps(_mm256_loadu_si256((__m256i const*)(mem_addr))) - #define _mm512_loadu_epi16_to___m512(mem_addr) _mm512_castsi512_ps(_mm512_loadu_si512((__m512i const*)(mem_addr))) - - #define _mm_loadu_epi32_to___m128(mem_addr) _mm_castsi128_ps(_mm_loadu_si128((__m128i const*)(mem_addr))) - #define _mm256_loadu_epi32_to___m256(mem_addr) _mm256_castsi256_ps(_mm256_loadu_si256((__m256i const*)(mem_addr))) - #define _mm512_loadu_epi32_to___m512(mem_addr) _mm512_castsi512_ps(_mm512_loadu_si512((__m512i const*)(mem_addr))) - - #define _mm_loadu_epi64_to___m128(mem_addr) _mm_castsi128_ps(_mm_loadu_si128((__m128i const*)(mem_addr))) - #define _mm256_loadu_epi64_to___m256(mem_addr) _mm256_castsi256_ps(_mm256_loadu_si256((__m256i const*)(mem_addr))) - #define _mm512_loadu_epi64_to___m512(mem_addr) _mm512_castsi512_ps(_mm512_loadu_si512((__m512i const*)(mem_addr))) - - // T1 is the `To` type, T2 is the `From` type - template T1 cast(T2 x) { - if constexpr ((std::is_integral_v && std::is_integral_v) || (std::is_floating_point_v && std::is_floating_point_v)) { - return x; - } else if constexpr (sizeof(T1) <= sizeof(T2)) { - T1 ret{}; - std::memcpy(&ret, &x, sizeof(T1)); - return ret; - } else { - static_assert(sizeof(T1) == sizeof(T2) || std::is_convertible_v, - "T2 must either be convertible to T1, or have the same size as T1!"); - return T1{}; - } - } -#endif -"#; -pub const PLATFORM_C_DEFINITIONS: &str = r#" - -std::ostream& operator<<(std::ostream& os, _Float16 value) { - os << static_cast(value); - return os; -} - -std::ostream& operator<<(std::ostream& os, __m128i value) { - void* temp = malloc(sizeof(__m128i)); - _mm_storeu_si128((__m128i*)temp, value); - std::stringstream ss; - - ss << "0x"; - for(int i = 0; i < 16; i++) { - ss << std::setfill('0') << std::setw(2) << std::hex << ((char*)temp)[i]; - } - os << ss.str(); - return os; -} - -std::ostream& operator<<(std::ostream& os, __m256i value) { - void* temp = malloc(sizeof(__m256i)); - _mm256_storeu_si256((__m256i*)temp, value); - std::stringstream ss; - - ss << "0x"; - for(int i = 0; i < 32; i++) { - ss << std::setfill('0') << std::setw(2) << std::hex << ((char*)temp)[i]; - } - os << ss.str(); - return os; -} - -std::ostream& operator<<(std::ostream& os, __m512i value) { - void* temp = malloc(sizeof(__m512i)); - _mm512_storeu_si512((__m512i*)temp, value); - std::stringstream ss; - - ss << "0x"; - for(int i = 0; i < 64; i++) { - ss << std::setfill('0') << std::setw(2) << std::hex << ((char*)temp)[i]; - } - os << ss.str(); - return os; -} - -std::ostream& operator<<(std::ostream& os, __mmask8 value) { - os << static_cast(value); - return os; -} "#; pub const PLATFORM_RUST_CFGS: &str = r#" -#![cfg_attr(target_arch = "x86", feature(avx))] -#![cfg_attr(target_arch = "x86", feature(sse))] -#![cfg_attr(target_arch = "x86", feature(sse2))] -#![cfg_attr(target_arch = "x86", feature(stdarch_x86_avx512_bf16))] -#![cfg_attr(target_arch = "x86", feature(stdarch_x86_avx512_f16))] -#![cfg_attr(target_arch = "x86", feature(stdarch_x86_rtm))] -#![cfg_attr(target_arch = "x86", feature(stdarch_x86_rtm))] -#![cfg_attr(target_arch = "x86_64", feature(x86_amx_intrinsics))] -#![cfg_attr(target_arch = "x86_64", feature(stdarch_x86_avx512_f16))] -#![feature(fmt_helpers_for_derive)] +#![feature(stdarch_x86_avx512_bf16)] +#![feature(stdarch_x86_avx512_f16)] +#![feature(stdarch_x86_rtm)] +#![feature(x86_amx_intrinsics)] "#; diff --git a/crates/intrinsic-test/src/x86/constraint.rs b/crates/intrinsic-test/src/x86/constraint.rs index 72f5da3b3f..608ffdd1ee 100644 --- a/crates/intrinsic-test/src/x86/constraint.rs +++ b/crates/intrinsic-test/src/x86/constraint.rs @@ -1,7 +1,10 @@ use crate::common::constraint::Constraint; -pub fn map_constraints(imm_type: &String, imm_width: u32) -> Option { +pub fn map_constraints(fn_name: &str, imm_type: &String, imm_width: u32) -> Option { if imm_width > 0 { + if fn_name == "_mm_sm3rnds2_epi32" { + return Some(Constraint::Set((0..64).step_by(2).collect())); + } let max: i64 = 2i64.pow(imm_width); return Some(Constraint::Range(0..max)); } diff --git a/crates/intrinsic-test/src/x86/mod.rs b/crates/intrinsic-test/src/x86/mod.rs index f2baf07071..5d4798482a 100644 --- a/crates/intrinsic-test/src/x86/mod.rs +++ b/crates/intrinsic-test/src/x86/mod.rs @@ -1,4 +1,3 @@ -mod compile; mod config; mod constraint; mod intrinsic; @@ -7,7 +6,6 @@ mod xml_parser; use crate::common::SupportedArchitectureTest; use crate::common::cli::ProcessedCli; -use crate::common::compile_c::CppCompilation; use crate::common::intrinsic::Intrinsic; use crate::common::intrinsic_helpers::TypeKind; use intrinsic::X86IntrinsicType; @@ -15,33 +13,59 @@ use xml_parser::get_xml_intrinsics; pub struct X86ArchitectureTest { intrinsics: Vec>, - cli_options: ProcessedCli, } impl SupportedArchitectureTest for X86ArchitectureTest { type IntrinsicImpl = X86IntrinsicType; - fn cli_options(&self) -> &ProcessedCli { - &self.cli_options - } - fn intrinsics(&self) -> &[Intrinsic] { &self.intrinsics } - fn cpp_compilation(&self) -> Option { - compile::build_cpp_compilation(&self.cli_options) - } - const NOTICE: &str = config::NOTICE; - const PLATFORM_C_HEADERS: &[&str] = &["immintrin.h", "cstddef", "cstdint"]; - const PLATFORM_C_DEFINITIONS: &str = config::PLATFORM_C_DEFINITIONS; - const PLATFORM_C_FORWARD_DECLARATIONS: &str = config::PLATFORM_C_FORWARD_DECLARATIONS; + const PLATFORM_C_HEADERS: &[&str] = &["immintrin.h"]; const PLATFORM_RUST_DEFINITIONS: &str = config::PLATFORM_RUST_DEFINITIONS; const PLATFORM_RUST_CFGS: &str = config::PLATFORM_RUST_CFGS; + fn arch_flags(&self) -> Vec<&str> { + vec![ + "-mavx", + "-mavx2", + "-mavx512f", + "-msse2", + "-mavx512vl", + "-mavx512bw", + "-mavx512dq", + "-mavx512cd", + "-mavx512fp16", + "-msha", + "-msha512", + "-msm3", + "-msm4", + "-mavxvnni", + "-mavxvnniint8", + "-mavxneconvert", + "-mavxifma", + "-mavxvnniint16", + "-mavx512bf16", + "-mavx512bitalg", + "-mavx512ifma", + "-mavx512vbmi", + "-mavx512vbmi2", + "-mavx512vnni", + "-mavx512vpopcntdq", + "-mavx512vp2intersect", + "-mbmi", + "-mbmi2", + "-mgfni", + "-mvaes", + "-mvpclmulqdq", + "-mlzcnt", + ] + } + fn create(cli_options: ProcessedCli) -> Self { let mut intrinsics = get_xml_intrinsics(&cli_options.filename).expect("Error parsing input file"); @@ -67,9 +91,6 @@ impl SupportedArchitectureTest for X86ArchitectureTest { .take(sample_size) .collect::>(); - Self { - intrinsics: intrinsics, - cli_options: cli_options, - } + Self { intrinsics } } } diff --git a/crates/intrinsic-test/src/x86/types.rs b/crates/intrinsic-test/src/x86/types.rs index 2391ee9c2d..cd7c41e06f 100644 --- a/crates/intrinsic-test/src/x86/types.rs +++ b/crates/intrinsic-test/src/x86/types.rs @@ -1,11 +1,8 @@ use std::str::FromStr; use itertools::Itertools; -use regex::Regex; use super::intrinsic::X86IntrinsicType; -use crate::common::cli::Language; -use crate::common::indentation::Indentation; use crate::common::intrinsic_helpers::{IntrinsicType, IntrinsicTypeDefinition, Sign, TypeKind}; use crate::x86::xml_parser::Parameter; @@ -26,82 +23,16 @@ impl IntrinsicTypeDefinition for X86IntrinsicType { .replace("const ", "") } - fn c_single_vector_type(&self) -> String { - // matches __m128, __m256 and similar types - let re = Regex::new(r"__m\d+").unwrap(); - if re.is_match(self.param.type_data.as_str()) { + fn rust_type(&self) -> String { + if self.is_simd() { self.param.type_data.clone() } else { - unreachable!("Shouldn't be called on this type") + format!("{}{}", self.kind.rust_prefix(), self.inner_size()) } } - // fn rust_type(&self) -> String { - // // handling edge cases first - // // the general handling is implemented below - // if let Some(val) = self.metadata.get("type") { - // match val.as_str() { - // "__m128 const *" => { - // return "&__m128".to_string(); - // } - // "__m128d const *" => { - // return "&__m128d".to_string(); - // } - // "const void*" => { - // return "&__m128d".to_string(); - // } - // _ => {} - // } - // } - - // if self.kind() == TypeKind::Void && self.ptr { - // // this has been handled by default settings in - // // the from_param function of X86IntrinsicType - // unreachable!() - // } - - // // general handling cases - // let core_part = if self.kind() == TypeKind::Mask { - // // all types of __mmask are handled here - // format!("__mask{}", self.bit_len.unwrap()) - // } else if self.simd_len.is_some() { - // // all types of __m vector types are handled here - // let re = Regex::new(r"\__m\d+[a-z]*").unwrap(); - // let rust_type = self - // .metadata - // .get("type") - // .map(|val| re.find(val).unwrap().as_str()); - // rust_type.unwrap().to_string() - // } else { - // format!( - // "{}{}", - // self.kind.rust_prefix().to_string(), - // self.bit_len.unwrap() - // ) - // }; - - // // extracting "memsize" so that even vector types can be involved - // let memwidth = self - // .metadata - // .get("memwidth") - // .map(|n| str::parse::(n).unwrap()); - // let prefix_part = if self.ptr && self.constant && self.bit_len.eq(&memwidth) { - // "&" - // } else if self.ptr && self.bit_len.eq(&memwidth) { - // "&mut " - // } else if self.ptr && self.constant { - // "*const " - // } else if self.ptr { - // "*mut " - // } else { - // "" - // }; - - // return prefix_part.to_string() + core_part.as_str(); - // } - /// Determines the load function for this type. - fn get_load_function(&self, _language: Language) -> String { + fn get_load_function(&self) -> String { let type_value = self.param.type_data.clone(); if type_value.len() == 0 { unimplemented!("the value for key 'type' is not present!"); @@ -168,82 +99,6 @@ impl IntrinsicTypeDefinition for X86IntrinsicType { } } - /// Generates a std::cout for the intrinsics results that will match the - /// rust debug output format for the return type. The generated line assumes - /// there is an int i in scope which is the current pass number. - fn print_result_c(&self, indentation: Indentation, additional: &str) -> String { - let lanes = if self.num_lanes() > 1 { - (0..self.num_lanes()) - .map(|idx| -> std::string::String { - let cast_type = self.c_promotion(); - let lane_fn = self.get_lane_function(); - if cast_type.len() > 2 { - format!("cast<{cast_type}>({lane_fn}(__return_value, {idx}))") - } else { - format!("{lane_fn}(__return_value, {idx})") - } - }) - .collect::>() - .join(r#" << ", " << "#) - } else { - format!( - "{promote}cast<{cast}>(__return_value)", - cast = match self.kind() { - TypeKind::Void => "void".to_string(), - TypeKind::Float if self.inner_size() == 64 => "double".to_string(), - TypeKind::Float if self.inner_size() == 32 => "float".to_string(), - TypeKind::Mask => format!( - "__mmask{}", - self.bit_len.expect(format!("self: {self:#?}").as_str()) - ), - TypeKind::Vector => format!( - "__m{}i", - self.bit_len.expect(format!("self: {self:#?}").as_str()) - ), - _ => self.c_scalar_type(), - }, - promote = self.generate_final_type_cast(), - ) - }; - - format!( - r#"{indentation}std::cout << "Result {additional}-" << i+1 << ": {ty}" << std::fixed << std::setprecision(150) << {lanes} << "{close}" << std::endl;"#, - ty = if self.is_simd() { - format!("{}(", self.c_type()) - } else { - String::from("") - }, - close = if self.is_simd() { ")" } else { "" }, - ) - } - - /// Determines the get lane function for this type. - fn get_lane_function(&self) -> String { - let total_vector_bits: Option = self - .simd_len - .zip(self.bit_len) - .and_then(|(simd_len, bit_len)| Some(simd_len * bit_len)); - - match (self.bit_len, total_vector_bits) { - (Some(8), Some(128)) => String::from("(uint8_t)_mm_extract_epi8"), - (Some(16), Some(128)) => String::from("(uint16_t)_mm_extract_epi16"), - (Some(32), Some(128)) => String::from("(uint32_t)_mm_extract_epi32"), - (Some(64), Some(128)) => String::from("(uint64_t)_mm_extract_epi64"), - (Some(8), Some(256)) => String::from("(uint8_t)_mm256_extract_epi8"), - (Some(16), Some(256)) => String::from("(uint16_t)_mm256_extract_epi16"), - (Some(32), Some(256)) => String::from("(uint32_t)_mm256_extract_epi32"), - (Some(64), Some(256)) => String::from("(uint64_t)_mm256_extract_epi64"), - (Some(8), Some(512)) => String::from("(uint8_t)_mm512_extract_intrinsic_test_epi8"), - (Some(16), Some(512)) => String::from("(uint16_t)_mm512_extract_intrinsic_test_epi16"), - (Some(32), Some(512)) => String::from("(uint32_t)_mm512_extract_intrinsic_test_epi32"), - (Some(64), Some(512)) => String::from("(uint64_t)_mm512_extract_intrinsic_test_epi64"), - _ => unreachable!( - "invalid length for vector argument: {:?}, {:?}", - self.bit_len, self.simd_len - ), - } - } - fn rust_scalar_type(&self) -> String { let prefix = match self.data.kind { TypeKind::Mask => String::from("__mmask"), @@ -258,23 +113,6 @@ impl IntrinsicTypeDefinition for X86IntrinsicType { }; format!("{prefix}{bits}") } - - fn print_result_rust(&self) -> String { - let return_value = match self.kind() { - // `_mm{256}_cvtps_ph` has return type __m128i but contains f16 values - TypeKind::Float if self.param.type_data == "__m128i" => { - "format_args!(\"{:.150?}\", debug_as::<_, f16>(__return_value))".to_string() - } - TypeKind::Int(_) - if ["__m128i", "__m256i", "__m512i"].contains(&self.param.type_data.as_str()) => - { - format!("debug_as::<_, u{}>(__return_value)", self.inner_size()) - } - _ => "format_args!(\"{__return_value:.150?}\")".to_string(), - }; - - return_value - } } impl X86IntrinsicType { diff --git a/crates/intrinsic-test/src/x86/xml_parser.rs b/crates/intrinsic-test/src/x86/xml_parser.rs index 681b1a3c52..6006d7919f 100644 --- a/crates/intrinsic-test/src/x86/xml_parser.rs +++ b/crates/intrinsic-test/src/x86/xml_parser.rs @@ -99,7 +99,7 @@ fn xml_to_intrinsic( } else { param.imm_width }; - let constraint = map_constraints(¶m.imm_type, effective_imm_width); + let constraint = map_constraints(&name, ¶m.imm_type, effective_imm_width); let arg = Argument::::new( i, param.var_name.clone(), diff --git a/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml b/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml index 2f7f2fc2b0..03fea5e0a4 100644 --- a/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml +++ b/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml @@ -2274,6 +2274,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-stable static_defs: ['const N: i32'] + big_endian_inverse: true safety: safe types: - poly64x2_t @@ -2291,6 +2292,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] - *neon-stable static_defs: ['const N: i32'] + big_endian_inverse: true safety: safe types: - [poly64x1_t, poly64x2_t] @@ -2482,6 +2484,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['2']] - *neon-stable static_defs: ['const N: i32'] + big_endian_inverse: true safety: safe types: - poly64x2_t @@ -5705,6 +5708,7 @@ intrinsics: - *neon-unstable-fcma assert_instr: [fcadd] safety: safe + big_endian_inverse: true types: - float32x2_t - float32x4_t @@ -5725,6 +5729,7 @@ intrinsics: - *neon-unstable-fcma assert_instr: [fcadd] safety: safe + big_endian_inverse: true types: - float32x2_t - float32x4_t @@ -5747,6 +5752,7 @@ intrinsics: - *target-not-arm64ec assert_instr: [fcadd] safety: safe + big_endian_inverse: true types: - float16x4_t - float16x8_t @@ -5768,6 +5774,7 @@ intrinsics: - *target-not-arm64ec assert_instr: [fcadd] safety: safe + big_endian_inverse: true types: - float16x4_t - float16x8_t @@ -5787,6 +5794,7 @@ intrinsics: - *neon-unstable-fcma assert_instr: [fcmla] safety: safe + big_endian_inverse: true types: - float32x2_t - float32x4_t @@ -5809,6 +5817,7 @@ intrinsics: - *target-not-arm64ec assert_instr: [fcmla] safety: safe + big_endian_inverse: true types: - float16x4_t - float16x8_t @@ -5828,6 +5837,7 @@ intrinsics: - *neon-unstable-fcma assert_instr: [fcmla] safety: safe + big_endian_inverse: true types: - float32x2_t - float32x4_t @@ -5850,6 +5860,7 @@ intrinsics: - *target-not-arm64ec assert_instr: [fcmla] safety: safe + big_endian_inverse: true types: - float16x4_t - float16x8_t @@ -5869,6 +5880,7 @@ intrinsics: - *neon-unstable-fcma assert_instr: [fcmla] safety: safe + big_endian_inverse: true types: - float32x2_t - float32x4_t @@ -5892,6 +5904,7 @@ intrinsics: - *target-not-arm64ec assert_instr: [fcmla] safety: safe + big_endian_inverse: true types: - float16x4_t - float16x8_t @@ -6043,6 +6056,7 @@ intrinsics: - *neon-unstable-fcma assert_instr: [fcmla] safety: safe + big_endian_inverse: true types: - float32x2_t - float32x4_t @@ -6066,6 +6080,7 @@ intrinsics: - *target-not-arm64ec assert_instr: [fcmla] safety: safe + big_endian_inverse: true types: - float16x4_t - float16x8_t @@ -6661,6 +6676,7 @@ intrinsics: attr: [*neon-stable] assert_instr: [faddp] safety: safe + big_endian_inverse: true types: - [float32x4_t, "4"] - [float64x2_t, "2"] @@ -6683,6 +6699,7 @@ intrinsics: - *target-not-arm64ec assert_instr: [faddp] safety: safe + big_endian_inverse: true types: - [float16x8_t, "8"] compose: @@ -6704,6 +6721,7 @@ intrinsics: - *target-not-arm64ec assert_instr: [fmaxp] safety: safe + big_endian_inverse: true types: - float16x4_t - float16x8_t @@ -6725,6 +6743,7 @@ intrinsics: - *target-not-arm64ec assert_instr: [fmaxnmp] safety: safe + big_endian_inverse: true types: - float16x4_t - float16x8_t @@ -6746,6 +6765,7 @@ intrinsics: - *target-not-arm64ec assert_instr: [fminp] safety: safe + big_endian_inverse: true types: - float16x4_t - float16x8_t @@ -6767,6 +6787,7 @@ intrinsics: - *target-not-arm64ec assert_instr: [fminnmp] safety: safe + big_endian_inverse: true types: - float16x4_t - float16x8_t @@ -6806,6 +6827,7 @@ intrinsics: attr: [*neon-stable] assert_instr: [fminp] safety: safe + big_endian_inverse: true types: - ["s_f32", float32x2_t, f32] - ["qd_f64", float64x2_t, f64] @@ -7061,7 +7083,7 @@ intrinsics: arguments: ["a: {type[0]}", "b: {type[1]}", "c: {type[1]}"] return_type: "{type[0]}" attr: - - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sqdmlal]]}]] + - FnCall: [cfg_attr, [*all-test-little-endian, {FnCall: [assert_instr, [sqdmlal]]}]] - *neon-stable safety: safe types: @@ -7089,7 +7111,7 @@ intrinsics: arguments: ["a: {type[0]}", "b: {type[1]}", "c: {neon_type[2]}"] return_type: "{type[0]}" attr: - - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sqdmlal, 'LANE = 0']]}]] + - FnCall: [cfg_attr, [*all-test-little-endian, {FnCall: [assert_instr, [sqdmlal, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-stable static_defs: ['const LANE: i32'] @@ -7175,7 +7197,7 @@ intrinsics: arguments: ["a: {type[0]}", "b: {type[1]}", "c: {type[1]}"] return_type: "{type[0]}" attr: - - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sqdmlsl]]}]] + - FnCall: [cfg_attr, [*all-test-little-endian, {FnCall: [assert_instr, [sqdmlsl]]}]] - *neon-stable safety: safe types: @@ -7203,7 +7225,7 @@ intrinsics: arguments: ["a: {type[0]}", "b: {type[1]}", "c: {neon_type[2]}"] return_type: "{type[0]}" attr: - - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sqdmlsl, 'LANE = 0']]}]] + - FnCall: [cfg_attr, [*all-test-little-endian, {FnCall: [assert_instr, [sqdmlsl, 'LANE = 0']]}]] - FnCall: [rustc_legacy_const_generics, ['3']] - *neon-stable static_defs: ['const LANE: i32'] @@ -8455,7 +8477,6 @@ intrinsics: attr: [*neon-stable] assert_instr: [nop] safety: safe - big_endian_inverse: true types: - [float64x1_t, int8x8_t] - [float64x1_t, int16x4_t] @@ -8512,7 +8533,6 @@ intrinsics: - *target-not-arm64ec assert_instr: [nop] safety: safe - big_endian_inverse: true types: - [float64x1_t, float16x4_t] - [float16x4_t, float64x1_t] @@ -8779,6 +8799,7 @@ intrinsics: - *neon-stable static_defs: ['const LANE1: i32, const LANE2: i32'] safety: safe + big_endian_inverse: true types: - [poly64x2_t, poly64x2_t, poly64x2_t, '1', '1'] compose: @@ -8841,6 +8862,7 @@ intrinsics: - *neon-stable static_defs: ['const LANE1: i32, const LANE2: i32'] safety: safe + big_endian_inverse: true types: - [poly64x2_t, poly64x1_t] compose: @@ -8874,7 +8896,6 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [nop]]}]] - *neon-stable safety: safe - big_endian_inverse: true types: - ["u64", float64x1_t] compose: @@ -8890,6 +8911,7 @@ intrinsics: - *neon-stable static_defs: ['const LANE: i32'] safety: safe + big_endian_inverse: true types: - ["f64", float64x1_t, float64x1_t] compose: @@ -8906,6 +8928,7 @@ intrinsics: - *neon-stable static_defs: ['const LANE: i32'] safety: safe + big_endian_inverse: true types: - ["f64", float64x2_t, float64x2_t] compose: @@ -9019,6 +9042,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sm3partw1]]}]] - FnCall: [unstable, ['feature = "stdarch_neon_sm4"', 'issue = "117226"']] safety: safe + big_endian_inverse: true types: - uint32x4_t compose: @@ -9037,6 +9061,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sm3partw2]]}]] - FnCall: [unstable, ['feature = "stdarch_neon_sm4"', 'issue = "117226"']] safety: safe + big_endian_inverse: true types: - uint32x4_t compose: @@ -9055,6 +9080,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sm3ss1]]}]] - FnCall: [unstable, ['feature = "stdarch_neon_sm4"', 'issue = "117226"']] safety: safe + big_endian_inverse: true types: - uint32x4_t compose: @@ -9073,6 +9099,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sm4ekey]]}]] - FnCall: [unstable, ['feature = "stdarch_neon_sm4"', 'issue = "117226"']] safety: safe + big_endian_inverse: true types: - uint32x4_t compose: @@ -9091,6 +9118,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sm4e]]}]] - FnCall: [unstable, ['feature = "stdarch_neon_sm4"', 'issue = "117226"']] safety: safe + big_endian_inverse: true types: - uint32x4_t compose: @@ -9127,6 +9155,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sha512h]]}]] - FnCall: [stable, ['feature = "stdarch_neon_sha3"', 'since = "1.79.0"']] safety: safe + big_endian_inverse: true types: - uint64x2_t compose: @@ -9145,6 +9174,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sha512h2]]}]] - FnCall: [stable, ['feature = "stdarch_neon_sha3"', 'since = "1.79.0"']] safety: safe + big_endian_inverse: true types: - uint64x2_t compose: @@ -9163,6 +9193,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sha512su0]]}]] - FnCall: [stable, ['feature = "stdarch_neon_sha3"', 'since = "1.79.0"']] safety: safe + big_endian_inverse: true types: - uint64x2_t compose: @@ -9181,6 +9212,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [sha512su1]]}]] - FnCall: [stable, ['feature = "stdarch_neon_sha3"', 'since = "1.79.0"']] safety: safe + big_endian_inverse: true types: - uint64x2_t compose: @@ -9201,6 +9233,7 @@ intrinsics: - FnCall: [unstable, ['feature = "stdarch_neon_sm4"', 'issue = "117226"']] static_defs: ["const IMM2: i32"] safety: safe + big_endian_inverse: true types: - ['1aq_u32', uint32x4_t, 'sm3tt1a', 'SM3TT1A'] - ['1bq_u32', uint32x4_t, 'sm3tt1b', 'SM3TT1B'] @@ -9445,6 +9478,7 @@ intrinsics: - *neon-stable - FnCall: [cfg_attr, [*cfg-test-not-msvc-little-endian, {FnCall: [assert_instr, [trn1]]}]] safety: safe + big_endian_inverse: true types: - [int8x8_t, '[0, 8, 2, 10, 4, 12, 6, 14]'] - [int8x16_t, '[0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]'] @@ -9475,6 +9509,7 @@ intrinsics: - *target-not-arm64ec - FnCall: [cfg_attr, [*cfg-test-not-msvc-little-endian, {FnCall: [assert_instr, [trn1]]}]] safety: safe + big_endian_inverse: true types: - [float16x4_t, '[0, 4, 2, 6]'] - [float16x8_t, '[0, 8, 2, 10, 4, 12, 6, 14]'] @@ -9489,6 +9524,7 @@ intrinsics: - *neon-stable - FnCall: [cfg_attr, [*cfg-test-not-msvc-little-endian, {FnCall: [assert_instr, [zip1]]}]] safety: safe + big_endian_inverse: true types: - [int32x2_t, '[0, 2]'] - [int64x2_t, '[0, 2]'] @@ -9508,6 +9544,7 @@ intrinsics: - *neon-stable - FnCall: [cfg_attr, [*cfg-test-not-msvc-little-endian, {FnCall: [assert_instr, [trn2]]}]] safety: safe + big_endian_inverse: true types: - [int8x8_t, '[1, 9, 3, 11, 5, 13, 7, 15]'] - [int8x16_t, '[1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]'] @@ -9537,6 +9574,7 @@ intrinsics: - *target-not-arm64ec - FnCall: [cfg_attr, [*cfg-test-not-msvc-little-endian, {FnCall: [assert_instr, [trn2]]}]] safety: safe + big_endian_inverse: true types: - [float16x4_t, '[1, 5, 3, 7]'] - [float16x8_t, '[1, 9, 3, 11, 5, 13, 7, 15]'] @@ -9551,6 +9589,7 @@ intrinsics: - *neon-stable - FnCall: [cfg_attr, [*cfg-test-not-msvc-little-endian, {FnCall: [assert_instr, [zip2]]}]] safety: safe + big_endian_inverse: true types: - [int32x2_t, '[1, 3]'] - [int64x2_t, '[1, 3]'] @@ -9570,6 +9609,7 @@ intrinsics: - *neon-stable - FnCall: [cfg_attr, [*cfg-test-not-msvc-little-endian, {FnCall: [assert_instr, [zip2]]}]] safety: safe + big_endian_inverse: true types: - [int8x8_t, '[4, 12, 5, 13, 6, 14, 7, 15]'] - [int8x16_t, '[8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]'] @@ -9606,6 +9646,7 @@ intrinsics: - *target-not-arm64ec - FnCall: [cfg_attr, [*cfg-test-not-msvc-little-endian, {FnCall: [assert_instr, [zip2]]}]] safety: safe + big_endian_inverse: true types: - [float16x4_t, '[2, 6, 3, 7]'] - [float16x8_t, '[4, 12, 5, 13, 6, 14, 7, 15]'] @@ -9620,6 +9661,7 @@ intrinsics: - *neon-stable - FnCall: [cfg_attr, [*cfg-test-not-msvc-little-endian, {FnCall: [assert_instr, [zip1]]}]] safety: safe + big_endian_inverse: true types: - [int8x8_t, '[0, 8, 1, 9, 2, 10, 3, 11]'] - [int8x16_t, '[0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]'] @@ -9657,6 +9699,7 @@ intrinsics: - *target-not-arm64ec - FnCall: [cfg_attr, [*cfg-test-not-msvc-little-endian, {FnCall: [assert_instr, [zip1]]}]] safety: safe + big_endian_inverse: true types: - [float16x4_t, '[0, 4, 1, 5]'] - [float16x8_t, '[0, 8, 1, 9, 2, 10, 3, 11]'] @@ -9671,6 +9714,7 @@ intrinsics: - *neon-stable - FnCall: [cfg_attr, [*cfg-test-not-msvc-little-endian, {FnCall: [assert_instr, [zip1]]}]] safety: safe + big_endian_inverse: true types: - [int32x2_t, '[0, 2]'] - [int64x2_t, '[0, 2]'] @@ -9690,6 +9734,7 @@ intrinsics: - *neon-stable - FnCall: [cfg_attr, [*cfg-test-not-msvc-little-endian, {FnCall: [assert_instr, [uzp1]]}]] safety: safe + big_endian_inverse: true types: - [int8x8_t, '[0, 2, 4, 6, 8, 10, 12, 14]'] - [int8x16_t, '[0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]'] @@ -9719,6 +9764,7 @@ intrinsics: - *target-not-arm64ec - FnCall: [cfg_attr, [*cfg-test-not-msvc-little-endian, {FnCall: [assert_instr, [uzp1]]}]] safety: safe + big_endian_inverse: true types: - [float16x4_t, '[0, 2, 4, 6]'] - [float16x8_t, '[0, 2, 4, 6, 8, 10, 12, 14]'] @@ -9733,6 +9779,7 @@ intrinsics: - *neon-stable - FnCall: [cfg_attr, [*cfg-test-not-msvc-little-endian, {FnCall: [assert_instr, [zip2]]}]] safety: safe + big_endian_inverse: true types: - [int32x2_t, '[1, 3]'] - [int64x2_t, '[1, 3]'] @@ -9752,6 +9799,7 @@ intrinsics: - *neon-stable - FnCall: [cfg_attr, [*cfg-test-not-msvc-little-endian, {FnCall: [assert_instr, [uzp2]]}]] safety: safe + big_endian_inverse: true types: - [int8x8_t, '[1, 3, 5, 7, 9, 11, 13, 15]'] - [int8x16_t, '[1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]'] @@ -9785,6 +9833,7 @@ intrinsics: - *target-not-arm64ec - FnCall: [cfg_attr, [*cfg-test-not-msvc-little-endian, {FnCall: [assert_instr, [uzp2]]}]] safety: safe + big_endian_inverse: true types: - [float16x4_t, '[1, 3, 5, 7]'] - [float16x8_t, '[1, 3, 5, 7, 9, 11, 13, 15]'] @@ -9942,6 +9991,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fmaxnmp]]}]] - *neon-stable safety: safe + big_endian_inverse: true types: - float32x2_t - float64x2_t @@ -11326,6 +11376,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fminnmp]]}]] - *neon-stable safety: safe + big_endian_inverse: true types: - ['s_f32', float32x2_t, "f32"] - ['qd_f64', float64x2_t, "f64"] @@ -11344,6 +11395,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fmaxnmp]]}]] - *neon-stable safety: safe + big_endian_inverse: true types: - ['s_f32', float32x2_t, "f32"] - ['qd_f64', float64x2_t, "f64"] @@ -11575,6 +11627,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [fminnmp]]}]] - *neon-stable safety: safe + big_endian_inverse: true types: - float32x2_t - float64x2_t @@ -12089,6 +12142,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] - *neon-stable safety: safe + big_endian_inverse: true types: - [int8x8_t, int8x16_t, uint8x8_t, vqtbx1] - [int8x16_t, int8x16_t, uint8x16_t, vqtbx1q] @@ -12108,6 +12162,7 @@ intrinsics: - [poly8x8_t, "poly8x16_t", uint8x8_t, "vqtbx1", "_p8"] - [uint8x16_t, "uint8x16_t", uint8x16_t, "vqtbx1q", "q_u8"] - [poly8x16_t, "poly8x16_t", uint8x16_t, "vqtbx1q", "q_p8"] + big_endian_inverse: true compose: - FnCall: - transmute @@ -12284,6 +12339,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] - *neon-stable safety: safe + big_endian_inverse: true types: - ['int8x16_t', uint8x8_t, 'vqtbl1', 'int8x8_t'] - ['int8x16_t', uint8x16_t, 'vqtbl1q', 'int8x16_t'] @@ -12303,6 +12359,7 @@ intrinsics: - ['poly8x16_t', uint8x8_t, 'vqtbl1', 'poly8x8_t'] - ['uint8x16_t', uint8x16_t, 'vqtbl1q', 'uint8x16_t'] - ['poly8x16_t', uint8x16_t, 'vqtbl1q', 'poly8x16_t'] + big_endian_inverse: true compose: - FnCall: - transmute @@ -12319,6 +12376,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] - *neon-stable safety: safe + big_endian_inverse: true types: - ['int8x16x2_t', uint8x8_t, 'vqtbl2', 'int8x8_t'] - ['int8x16x2_t', uint8x16_t, 'vqtbl2q', 'int8x16_t'] @@ -12332,6 +12390,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] - *neon-stable + big_endian_inverse: true safety: safe types: - ['uint8x16x2_t', uint8x8_t, 'vqtbl2', 'uint8x8_t'] @@ -12355,6 +12414,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] - *neon-stable safety: safe + big_endian_inverse: true types: - [int8x8_t, 'int8x16x2_t', uint8x8_t, 'vqtbx2'] - [int8x16_t, 'int8x16x2_t', uint8x16_t, 'vqtbx2q'] @@ -12368,6 +12428,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] - *neon-stable + big_endian_inverse: true safety: safe types: - [uint8x8_t, 'uint8x16x2_t', uint8x8_t, 'vqtbx2'] @@ -12391,6 +12452,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] - *neon-stable + big_endian_inverse: true safety: safe types: - ['int8x8_t', 'int8x16x3_t', uint8x8_t, 'vqtbl3'] @@ -12405,6 +12467,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] - *neon-stable + big_endian_inverse: true safety: safe types: - ['uint8x8_t', 'uint8x16x3_t', uint8x8_t, 'vqtbl3'] @@ -12429,6 +12492,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] - *neon-stable safety: safe + big_endian_inverse: true types: - [int8x8_t, 'int8x16x3_t', uint8x8_t, 'vqtbx3'] - [int8x16_t, 'int8x16x3_t', uint8x16_t, 'vqtbx3q'] @@ -12442,6 +12506,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] - *neon-stable + big_endian_inverse: true safety: safe types: - [uint8x8_t, 'uint8x16x3_t', uint8x8_t, 'vqtbx3'] @@ -12466,6 +12531,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] - *neon-stable + big_endian_inverse: true safety: safe types: - ['int8x16x4_t', uint8x8_t, 'vqtbl4', 'int8x8_t'] @@ -12480,6 +12546,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbl]]}]] - *neon-stable + big_endian_inverse: true safety: safe types: - ['uint8x16x4_t', uint8x8_t, 'vqtbl4', 'uint8x8_t'] @@ -12505,6 +12572,7 @@ intrinsics: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] - *neon-stable safety: safe + big_endian_inverse: true types: - [int8x8_t, 'int8x16x4_t', uint8x8_t, 'vqtbx4'] - [int8x16_t, 'int8x16x4_t', uint8x16_t, 'vqtbx4q'] @@ -12518,6 +12586,7 @@ intrinsics: attr: - FnCall: [cfg_attr, [test, {FnCall: [assert_instr, [tbx]]}]] - *neon-stable + big_endian_inverse: true safety: safe types: - [uint8x8_t, 'uint8x16x4_t', uint8x8_t, 'vqtbx4'] @@ -12964,6 +13033,7 @@ intrinsics: - *neon-stable assert_instr: [addp] safety: safe + big_endian_inverse: true types: - [int8x16_t, "16"] - [int16x8_t, "8"] @@ -13252,6 +13322,7 @@ intrinsics: - *neon-stable assert_instr: ['sminp'] safety: safe + big_endian_inverse: true types: - int8x16_t - int16x8_t @@ -13271,6 +13342,7 @@ intrinsics: - *neon-stable assert_instr: ['uminp'] safety: safe + big_endian_inverse: true types: - uint8x16_t - uint16x8_t @@ -13290,6 +13362,7 @@ intrinsics: - *neon-stable assert_instr: ['fminp'] safety: safe + big_endian_inverse: true types: - float32x4_t - float64x2_t @@ -13308,6 +13381,7 @@ intrinsics: - *neon-stable assert_instr: ['smaxp'] safety: safe + big_endian_inverse: true types: - int8x16_t - int16x8_t @@ -13327,6 +13401,7 @@ intrinsics: - *neon-stable assert_instr: ['umaxp'] safety: safe + big_endian_inverse: true types: - uint8x16_t - uint16x8_t @@ -13346,6 +13421,7 @@ intrinsics: - *neon-stable assert_instr: ['fmaxp'] safety: safe + big_endian_inverse: true types: - float32x4_t - float64x2_t @@ -13500,6 +13576,7 @@ intrinsics: - *target-not-arm64ec assert_instr: [fmlal2] safety: safe + big_endian_inverse: true types: - [float32x2_t, float16x4_t, '_high_'] - [float32x4_t, float16x8_t, 'q_high_'] @@ -13548,6 +13625,7 @@ intrinsics: - *target-not-arm64ec assert_instr: [fmlal] safety: safe + big_endian_inverse: true types: - [float32x2_t, float16x4_t, '_low_'] - [float32x4_t, float16x8_t, 'q_low_'] @@ -13596,6 +13674,7 @@ intrinsics: - *target-not-arm64ec assert_instr: [fmlsl2] safety: safe + big_endian_inverse: true types: - [float32x2_t, float16x4_t, '_high_'] - [float32x4_t, float16x8_t, 'q_high_'] @@ -13643,6 +13722,7 @@ intrinsics: - *target-not-arm64ec assert_instr: [fmlsl] safety: safe + big_endian_inverse: true types: - [float32x2_t, float16x4_t, '_low_'] - [float32x4_t, float16x8_t, 'q_low_'] @@ -14104,8 +14184,9 @@ intrinsics: - *neon-stable assert_instr: ['{type[3]}'] safety: safe + big_endian_inverse: true types: - - ['vget_high_f64', 'float64x2_t', 'float64x1_t', 'fmov', 'float64x1_t([simd_extract!(a, 1)])'] + - ['vget_high_f64', 'float64x2_t', 'float64x1_t', 'nop', 'float64x1_t([simd_extract!(a, 1)])'] - ['vget_low_f64', 'float64x2_t', 'float64x1_t', 'nop', 'float64x1_t([simd_extract!(a, 0)])'] compose: - Identifier: ['{type[4]}', UnsafeSymbol] @@ -14118,6 +14199,7 @@ intrinsics: - *neon-stable assert_instr: [mov] safety: safe + big_endian_inverse: true types: - [float64x1_t, float64x2_t, '[0, 1]'] compose: @@ -14132,6 +14214,7 @@ intrinsics: - FnCall: [rustc_legacy_const_generics, ['1']] assert_instr: [['nop', 'IMM5 = 0']] safety: safe + big_endian_inverse: true static_defs: ["const IMM5: i32"] types: - ['float64x2_t', 'f64'] diff --git a/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index 972d838f42..e8682cf45f 100644 --- a/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -1442,6 +1442,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe + big_endian_inverse: true types: - [_lane_s8, int8x8_t, int8x8_t, '3', '[N as u32; 8]'] - [q_lane_s8, int8x8_t, int8x16_t, '3', '[N as u32; 16]'] @@ -1466,6 +1467,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe + big_endian_inverse: true types: - [q_laneq_s8, int8x16_t, int8x16_t, '4', '[N as u32; 16]'] - [_laneq_s8, int8x16_t, int8x8_t, '4', '[N as u32; 8]'] @@ -1490,6 +1492,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe + big_endian_inverse: true types: - [_lane_s16, int16x4_t, int16x4_t, '2', '[N as u32; 4]'] - [q_lane_s16, int16x4_t, int16x8_t, '2', '[N as u32; 8]'] @@ -1514,6 +1517,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe + big_endian_inverse: true types: - [q_laneq_s16, int16x8_t, int16x8_t, '3', '[N as u32; 8]'] - [_laneq_s16, int16x8_t, int16x4_t, '3', '[N as u32; 4]'] @@ -1541,6 +1545,7 @@ intrinsics: - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe + big_endian_inverse: true types: - [q_laneq_f16, float16x8_t, float16x8_t, '3', '[N as u32; 8]'] - [_laneq_f16, float16x8_t, float16x4_t, '3', '[N as u32; 4]'] @@ -1581,6 +1586,7 @@ intrinsics: - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe + big_endian_inverse: true types: - [_lane_f16, float16x4_t, float16x4_t, '2', '[N as u32; 4]'] - [q_lane_f16, float16x4_t, float16x8_t, '2', '[N as u32; 8]'] @@ -1602,6 +1608,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe + big_endian_inverse: true types: - [_lane_s32, int32x2_t, int32x2_t, '1', '[N as u32, N as u32]'] - [q_lane_s32, int32x2_t, int32x4_t, '1', '[N as u32, N as u32, N as u32, N as u32]'] @@ -1626,6 +1633,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe + big_endian_inverse: true types: - [q_laneq_s32, int32x4_t, int32x4_t, '2', '[N as u32, N as u32, N as u32, N as u32]'] - [_laneq_s32, int32x4_t, int32x2_t, '2', '[N as u32, N as u32]'] @@ -1650,6 +1658,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe + big_endian_inverse: true types: - [q_laneq_s64, int64x2_t, '1', '[N as u32, N as u32]'] - [q_laneq_u64, uint64x2_t, '1', '[N as u32, N as u32]'] @@ -1670,6 +1679,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe + big_endian_inverse: true types: - [q_lane_s64, int64x1_t, int64x2_t] - [q_lane_u64, uint64x1_t, uint64x2_t] @@ -1732,6 +1742,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe + big_endian_inverse: true types: - int8x8_t - int16x8_t @@ -1756,6 +1767,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe + big_endian_inverse: true types: - int8x16_t - uint8x16_t @@ -1777,6 +1789,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe + big_endian_inverse: true types: - int16x4_t - int32x4_t @@ -1804,6 +1817,7 @@ intrinsics: - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe + big_endian_inverse: true types: - float16x4_t compose: @@ -1825,6 +1839,7 @@ intrinsics: - *target-not-arm64ec static_defs: ['const N: i32'] safety: safe + big_endian_inverse: true types: - float16x8_t compose: @@ -1846,6 +1861,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe + big_endian_inverse: true types: - int32x2_t - uint32x2_t @@ -1867,6 +1883,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: ['const N: i32'] safety: safe + big_endian_inverse: true types: - int64x2_t - uint64x2_t @@ -6138,6 +6155,7 @@ intrinsics: - *neon-unstable-i8mm - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - [int32x2_t, uint8x8_t, int8x8_t] - [int32x4_t, uint8x16_t, int8x16_t] @@ -7076,6 +7094,7 @@ intrinsics: - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [unstable, ['feature = "stdarch_neon_dotprod"', 'issue = "117224"']]}]] - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - [int32x2_t, int8x8_t] - [int32x4_t, int8x16_t] @@ -7100,6 +7119,7 @@ intrinsics: - FnCall: [cfg_attr, [{FnCall: [not, ['target_arch = "arm"']]}, {FnCall: [unstable, ['feature = "stdarch_neon_dotprod"', 'issue = "117224"']]}]] - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - [uint32x2_t, uint8x8_t] - [uint32x4_t, uint8x16_t] @@ -7473,6 +7493,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - float32x2_t compose: @@ -7497,6 +7518,7 @@ intrinsics: - *neon-cfg-arm-unstable - *target-not-arm64ec safety: safe + big_endian_inverse: true types: - float16x4_t compose: @@ -8494,7 +8516,6 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe - big_endian_inverse: true types: - [poly64x1_t, int32x2_t] - [poly64x1_t, uint32x2_t] @@ -8790,7 +8811,6 @@ intrinsics: - *neon-cfg-arm-unstable - *target-not-arm64ec safety: safe - big_endian_inverse: true types: # non-q - [float32x2_t, float16x4_t] @@ -8873,7 +8893,6 @@ intrinsics: - *neon-cfg-arm-unstable - *target-not-arm64ec safety: safe - big_endian_inverse: true types: - [poly64x1_t, float16x4_t] - [float16x4_t, poly64x1_t] @@ -8898,6 +8917,7 @@ intrinsics: - *neon-cfg-arm-unstable - *target-not-arm64ec safety: safe + big_endian_inverse: true types: - [float16x4_t, "[3, 2, 1, 0]"] - [float16x8_t, "[3, 2, 1, 0, 7, 6, 5, 4]"] @@ -9187,7 +9207,6 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe - big_endian_inverse: true types: - ["u64", int8x8_t] - ["u64", int16x4_t] @@ -9216,7 +9235,6 @@ intrinsics: - *neon-cfg-arm-unstable - *target-not-arm64ec safety: safe - big_endian_inverse: true types: - ["u64", float16x4_t] compose: @@ -9234,7 +9252,6 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe - big_endian_inverse: true types: - ["u64", poly64x1_t] compose: @@ -9253,6 +9270,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe + big_endian_inverse: true types: - ["i8", int8x8_t, '3'] - ["i16", int16x4_t, '2'] @@ -9293,6 +9311,7 @@ intrinsics: - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: safe + big_endian_inverse: true types: - ["f16", float16x4_t, '2'] - ["f16", float16x8_t, '3'] @@ -9314,6 +9333,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe + big_endian_inverse: true types: - ["i64", int64x1_t, int64x1_t] - ["u64", uint64x1_t, uint64x1_t] @@ -9335,6 +9355,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe + big_endian_inverse: true types: - ["p64", poly64x1_t, poly64x1_t] compose: @@ -9355,6 +9376,7 @@ intrinsics: - *neon-cfg-arm-unstable static_defs: ['const LANE: i32'] safety: safe + big_endian_inverse: true types: - ["p64", poly64x2_t, poly64x2_t] compose: @@ -9681,6 +9703,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - [int8x8_t, int8x8x2_t, '[0, 8, 2, 10, 4, 12, 6, 14]', '[1, 9, 3, 11, 5, 13, 7, 15]'] - [int16x4_t, int16x4x2_t, '[0, 4, 2, 6]', '[1, 5, 3, 7]'] @@ -9725,6 +9748,7 @@ intrinsics: - *neon-cfg-arm-unstable - *target-not-arm64ec safety: safe + big_endian_inverse: true types: - [float16x4_t, float16x4x2_t, '[0, 4, 2, 6]', '[1, 5, 3, 7]'] - [float16x8_t, float16x8x2_t, '[0, 8, 2, 10, 4, 12, 6, 14]', '[1, 9, 3, 11, 5, 13, 7, 15]'] @@ -9754,6 +9778,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - [int32x2_t, int32x2x2_t, '[0, 2]', '[1, 3]'] - [uint32x2_t, uint32x2x2_t, '[0, 2]', '[1, 3]'] @@ -9783,6 +9808,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - [int8x16_t, int8x16x2_t, '[0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]', '[8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]'] - [int16x8_t, int16x8x2_t, '[0, 8, 1, 9, 2, 10, 3, 11]', '[4, 12, 5, 13, 6, 14, 7, 15]'] @@ -9818,6 +9844,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - [int32x2_t, int32x2x2_t, '[0, 2]', '[1, 3]'] - [uint32x2_t, uint32x2x2_t, '[0, 2]', '[1, 3]'] @@ -9847,6 +9874,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - [int8x8_t, int8x8x2_t, '[0, 8, 1, 9, 2, 10, 3, 11]', '[4, 12, 5, 13, 6, 14, 7, 15]'] - [int16x4_t, int16x4x2_t, '[0, 4, 1, 5]', '[2, 6, 3, 7]'] @@ -9882,6 +9910,7 @@ intrinsics: - *neon-cfg-arm-unstable - *target-not-arm64ec safety: safe + big_endian_inverse: true types: - [float16x4_t, float16x4x2_t, '[0, 4, 1, 5]', '[2, 6, 3, 7]'] - [float16x8_t, float16x8x2_t, '[0, 8, 1, 9, 2, 10, 3, 11]', '[4, 12, 5, 13, 6, 14, 7, 15]'] @@ -9910,6 +9939,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - [int8x8_t, int8x8x2_t, '[0, 2, 4, 6, 8, 10, 12, 14]', '[1, 3, 5, 7, 9, 11, 13, 15]'] - [int16x4_t, int16x4x2_t, '[0, 2, 4, 6]', '[1, 3, 5, 7]'] @@ -9954,6 +9984,7 @@ intrinsics: - *neon-cfg-arm-unstable - *target-not-arm64ec safety: safe + big_endian_inverse: true types: - [float16x4_t, float16x4x2_t, '[0, 2, 4, 6]', '[1, 3, 5, 7]'] - [float16x8_t, float16x8x2_t, '[0, 2, 4, 6, 8, 10, 12, 14]', '[1, 3, 5, 7, 9, 11, 13, 15]'] @@ -9983,6 +10014,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - [float32x2_t, float32x2x2_t, '[0, 2]', '[1, 3]'] - [int32x2_t, int32x2x2_t, '[0, 2]', '[1, 3]'] @@ -12502,6 +12534,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - [float32x2_t, float32x4_t, '[0, 1, 2, 3]'] - [poly8x8_t, poly8x16_t, '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]'] @@ -12529,6 +12562,7 @@ intrinsics: - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe + big_endian_inverse: true types: - uint8x16_t compose: @@ -12551,6 +12585,7 @@ intrinsics: - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe + big_endian_inverse: true types: - uint8x16_t compose: @@ -12573,6 +12608,7 @@ intrinsics: - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe + big_endian_inverse: true types: - [uint8x16_t, "aesmc"] compose: @@ -12595,6 +12631,7 @@ intrinsics: - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe + big_endian_inverse: true types: - [uint8x16_t, "aesimc"] compose: @@ -12617,6 +12654,7 @@ intrinsics: - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe + big_endian_inverse: true types: - [u32, "sha1h"] compose: @@ -12639,6 +12677,7 @@ intrinsics: - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe + big_endian_inverse: true types: - [u32, "sha1c", "uint32x4_t"] compose: @@ -12661,6 +12700,7 @@ intrinsics: - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe + big_endian_inverse: true types: - [u32, "sha1m", "uint32x4_t"] compose: @@ -12683,6 +12723,7 @@ intrinsics: - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe + big_endian_inverse: true types: - [u32, "sha1p", "uint32x4_t"] compose: @@ -12705,6 +12746,7 @@ intrinsics: - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe + big_endian_inverse: true types: - [uint32x4_t, "sha1su0"] compose: @@ -12727,6 +12769,7 @@ intrinsics: - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe + big_endian_inverse: true types: - [uint32x4_t, "sha1su1"] compose: @@ -12749,6 +12792,7 @@ intrinsics: - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe + big_endian_inverse: true types: - [uint32x4_t, "sha256h"] compose: @@ -12771,6 +12815,7 @@ intrinsics: - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe + big_endian_inverse: true types: - [uint32x4_t, "sha256h2"] compose: @@ -12793,6 +12838,7 @@ intrinsics: - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe + big_endian_inverse: true types: - [uint32x4_t, "sha256su0"] compose: @@ -12815,6 +12861,7 @@ intrinsics: - *neon-cfg-arm-unstable - FnCall: [cfg_attr, [*not-arm, { FnCall: [stable, ['feature = "aarch64_neon_crypto_intrinsics"', 'since = "1.72.0"']] }]] safety: safe + big_endian_inverse: true types: - [uint32x4_t, "sha256su1"] compose: @@ -13052,6 +13099,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - int8x8_t - int16x4_t @@ -13076,6 +13124,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - uint8x8_t - uint16x4_t @@ -13100,6 +13149,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - float32x2_t compose: @@ -13122,6 +13172,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - int8x8_t - int16x4_t @@ -13146,6 +13197,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - uint8x8_t - uint16x4_t @@ -13170,6 +13222,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - float32x2_t compose: @@ -13290,6 +13343,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - int8x8_t - int16x4_t @@ -14140,6 +14194,7 @@ intrinsics: - *target-not-arm64ec assert_instr: [nop] safety: safe + big_endian_inverse: true types: - [float16x4_t, float16x8_t] compose: @@ -14157,6 +14212,7 @@ intrinsics: - *target-not-arm64ec assert_instr: [nop] safety: safe + big_endian_inverse: true types: - [float16x4_t, float16x8_t, 'low', "[0, 1, 2, 3]"] - [float16x4_t, float16x8_t, 'high', "[4, 5, 6, 7]"] @@ -14177,6 +14233,7 @@ intrinsics: - *target-not-arm64ec static_defs: ['const LANE: i32'] safety: safe + big_endian_inverse: true types: - [float16x4_t, f16, '_lane_f16', '2'] - [float16x8_t, f16, 'q_lane_f16', '3'] @@ -14928,6 +14985,7 @@ intrinsics: arguments: ["v: {neon_type[1]}"] return_type: "{type[2]}" safety: safe + big_endian_inverse: true static_defs: ['const IMM5: i32'] attr: - *neon-v7 @@ -14966,6 +15024,7 @@ intrinsics: arguments: ["v: {neon_type[1]}"] return_type: "{type[2]}" safety: safe + big_endian_inverse: true static_defs: ['const IMM5: i32'] attr: - *neon-v7 @@ -15012,6 +15071,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - ['vget_high_s64', 'int64x2_t', 'int64x1_t', 'vmov', 'ext', 'int64x1_t([simd_extract!(a, 1)])'] - ['vget_high_u64', 'uint64x2_t', 'uint64x1_t', 'vmov', 'ext', 'uint64x1_t([simd_extract!(a, 1)])'] @@ -15029,6 +15089,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - ['vget_low_s64', 'int64x2_t', 'int64x1_t', 'int64x1_t([simd_extract!(a, 0)])'] - ['vget_low_u64', 'uint64x2_t', 'uint64x1_t', 'uint64x1_t([simd_extract!(a, 0)])'] @@ -15047,6 +15108,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - ['vget_high_s8', 'int8x16_t', 'int8x8_t', 'vmov', 'ext', '[8, 9, 10, 11, 12, 13, 14, 15]'] - ['vget_high_u8', 'uint8x16_t', 'uint8x8_t', 'vmov', 'ext', '[8, 9, 10, 11, 12, 13, 14, 15]'] @@ -15070,6 +15132,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - ['vget_low_s8', 'int8x16_t', 'int8x8_t', '[0, 1, 2, 3, 4, 5, 6, 7]'] - ['vget_low_u8', 'uint8x16_t', 'uint8x8_t','[0, 1, 2, 3, 4, 5, 6, 7]'] @@ -15222,6 +15285,7 @@ intrinsics: - *neon-not-arm-stable - *neon-cfg-arm-unstable safety: safe + big_endian_inverse: true types: - ['vrev16_s8', 'int8x8_t', 'vrev16.8', 'rev16', '[1, 0, 3, 2, 5, 4, 7, 6]'] - ['vrev16q_s8', 'int8x16_t', 'vrev16.8', 'rev16', '[1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]']